repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
oxtopus/nupic | examples/opf/tools/MirrorImageViz/mirrorImageViz.py | 9 | 7336 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
import sys
import numpy as np
import matplotlib.pylab as pyl
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):
'''Mirror Image Visualization: Shows the encoding space juxtaposed against the
coincidence space. The encoding space is the bottom-up sensory encoding and
the coincidence space depicts the corresponding activation of coincidences in
the SP. Hence, the mirror image visualization is a visual depiction of the
mapping of SP cells to the input representations.
Note:
* The files spBUOut and sensorBUOut are assumed to be in the output format
used for LPF experiment outputs.
* BU outputs for some sample datasets are provided. Specify the name of the
dataset as an option while running this script.
'''
lines = activeCoincsFile.readlines()
inputs = encodingsFile.readlines()
w = len(inputs[0].split(' '))-1
patterns = set([])
encodings = set([])
coincs = [] #The set of all coincidences that have won at least once
reUsedCoincs = []
firstLine = inputs[0].split(' ')
size = int(firstLine.pop(0))
spOutput = np.zeros((len(lines),40))
inputBits = np.zeros((len(lines),w))
print 'Total n:', size
print 'Total number of records in the file:', len(lines), '\n'
print 'w:', w
count = 0
for x in xrange(len(lines)):
inputSpace = [] #Encoded representation for each input
spBUout = [int(z) for z in lines[x].split(' ')]
spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP
temp = set(spBUout)
spOutput[x]=spBUout
input = [int(z) for z in inputs[x].split(' ')]
input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space
tempInput = set(input)
inputBits[x]=input
#Creating the encoding space
for m in xrange(size):
if m in tempInput:
inputSpace.append(m)
else:
inputSpace.append('|') #A non-active bit
repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active
reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active
#Dividing the coincidences into two difference categories.
if len(reUsed)==0:
coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary)
else:
reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))
patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once
encodings = encodings.union(tempInput)
count +=1
overlap = {}
overlapVal = 0
seen = []
seen = (printOverlaps(coincs, coincs, seen))
print len(seen), 'sets of 40 cells'
seen = printOverlaps(reUsedCoincs, coincs, seen)
Summ=[]
for z in coincs:
c=0
for y in reUsedCoincs:
c += len(z[1].intersection(y[1]))
Summ.append(c)
print 'Sum: ', Summ
for m in xrange(3):
displayLimit = min(51, len(spOutput[m*200:]))
if displayLimit>0:
drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1)
else:
print 'No more records to display'
pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen):
""" Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs
"""
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen
################################################################################################################
if __name__=='__main__':
if len(sys.argv)<2: #Use basil if no dataset specified
print ('Input files required. Read documentation for details.')
else:
dataset = sys.argv[1]
activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt'
encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt'
activeCoincsFile=open(activeCoincsPath, 'r')
encodingsFile=open(encodingsPath, 'r')
analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
| gpl-3.0 |
microsoft/LightGBM | examples/python-guide/sklearn_example.py | 1 | 2787 | # coding: utf-8
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
import lightgbm as lgb
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
print('Starting training...')
# train
gbm = lgb.LGBMRegressor(num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
# feature importances
print(f'Feature importances: {list(gbm.feature_importances_)}')
# self-defined eval metric
# f(y_true: array, y_pred: array) -> name: string, eval_result: float, is_higher_better: bool
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False
print('Starting training with custom eval function...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=rmsle,
early_stopping_rounds=5)
# another self-defined eval metric
# f(y_true: array, y_pred: array) -> name: string, eval_result: float, is_higher_better: bool
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
return 'RAE', np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False
print('Starting training with multiple custom eval functions...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=[rmsle, rae],
early_stopping_rounds=5)
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmsle_test = rmsle(y_test, y_pred)[1]
rae_test = rae(y_test, y_pred)[1]
print(f'The RMSLE of prediction is: {rmsle_test}')
print(f'The RAE of prediction is: {rae_test}')
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid, cv=3)
gbm.fit(X_train, y_train)
print(f'Best parameters found by grid search are: {gbm.best_params_}')
| mit |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/numpy/lib/function_base.py | 30 | 124613 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
from ._compiled_base import add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X -= X.mean(axis=1-axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
if axis is not None and axis >= a.ndim:
raise IndexError(
"axis %d out of bounds (%d)" % (axis, a.ndim))
if overwrite_input:
if axis is None:
part = a.ravel()
sz = part.size
if sz % 2 == 0:
szh = sz // 2
part.partition((szh - 1, szh))
else:
part.partition((sz - 1) // 2)
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
a.partition((szh - 1, szh), axis=axis)
else:
a.partition((sz - 1) // 2, axis=axis)
part = a
else:
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis)
else:
part = partition(a, (sz - 1) // 2, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
if indices.dtype == intp: # take the points along axis
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays together.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| apache-2.0 |
fernandezcuesta/pySMSCMon | t4mon/gen_report.py | 2 | 8298 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Report generator module based on **Jinja2**
"""
import codecs
import datetime as dt
from os import path
import six
import tqdm
import jinja2
from t4mon import gen_plot, arguments
from matplotlib import pyplot as plt
from t4mon.logger import init_logger
# from ast import literal_eval # TODO: is literal_eval working in Linux?
class Report(object):
"""Generate an HTML report, drawing all the items defined in a
``graphs_definition_file``.
Arguments:
container (t4mon.Orchestrator): cosa
system (str):
Identifier of the system for which the report will be
generated.
It must be a valid identifier present in ``container.data``,
more specifically matching one of
``container.data.index.levels[-1]``.
logger (Optional[logging.Logger]):
logging object optionally passed directly
Note:
Attributes in container are passed transparently to Report
Attributes:
data (pandas.DataFrame):
MultiIndex dataframe that will be used as data source
settings_file (str):
Settings filename where ``graphs_definition_file`` and
``html_template`` are defined
logs (dict):
log output (value) corresponding for each system (key)
date_time (str):
collection timestamp in the format ``%d/%m/%Y %H:%M:%S``
Note:
**Graphs definition file format** ::
###################################################################
# #
# Syntax (all lines starting with # will be treated as comments): #
# var_names;title;plot_options #
# #
# Where: #
# var_names: list of regular expressions matching column names #
# separated with commas #
# title: string containing graph's title #
# plot_option: [optional] comma-separated options passed #
# transparently to matplotlib #
###################################################################
# This is just a comment. No inline comments allowed.
message_buffered;Test 1
successful_FDA;Test 2 (percentage);ylim=(0.0,100.0),linewidth=2
"""
def __init__(self, container, system, logger=None):
self.system = system
# Transparently pass all container items
for item in container.__dict__:
setattr(self, item, getattr(container, item))
if 'loglevel' not in self.__dict__:
self.loglevel = logger.DEFAULT_LOGLEVEL
self.logger = logger or init_logger(self.loglevel)
current_date = dt.datetime.strptime(self.date_time,
"%d/%m/%Y %H:%M:%S")
self.year = current_date.year
# populate self.html_template and self.graphs_definition_file
conf = arguments.read_config(self.settings_file)
for item in ['html_template', 'graphs_definition_file']:
setattr(self,
item,
arguments.get_absolute_path(conf.get('MISC', item),
self.settings_file))
def render(self):
"""
Create the Jinja2 environment.
Notice the use of `trim_blocks`, which greatly helps control
whitespace.
"""
try:
assert not self.data.empty # Check that data isn't empty
assert self.system # Check a system was specified
env_dir = path.dirname(path.abspath(self.html_template))
j2_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(env_dir),
trim_blocks=True
)
j2_tpl = j2_env.get_template(path.basename(self.html_template))
j2_tpl.globals['render_graphs'] = self.render_graphs
self.logger.info('{0} | Generating graphics and rendering report '
'(may take a while)'.format(self.system))
return j2_tpl.generate(data=self)
except AssertionError:
self.logger.error('{0} | No data, no report'.format(self.system)
if self.system
else 'Not a valid system, report skipped')
except IOError:
self.logger.error('Template file ({0}) not found.'
.format(self.html_template))
except jinja2.TemplateError as msg:
self.logger.error('{0} | Error in html template ({1}): {2}'
.format(self.system,
self.html_template,
repr(msg)))
# Stop the generator in case of exception
raise StopIteration
def render_graphs(self):
""" Produce base64 encoded graphs for the selected system
(``self.system``).
Yield:
tuple: (``graph_title``, ``graph_encoded_in_b64``)
"""
try:
progressbar_prefix = 'Rendering report for {}'.format(self.system)
with open(self.graphs_definition_file, 'r') as graphs_txt:
graphs_txt_contents = graphs_txt.readlines()
for line in tqdm.tqdm(graphs_txt_contents,
leave=True,
desc=progressbar_prefix,
unit='Graphs'):
line = line.strip()
if not len(line) or line[0] == '#':
continue
info = line.split(';')
# info[0] contains a comma-separated list of parameters to
# be drawn
# info[1] contains the title
# info[2] contains the plot options
if len(info) == 1:
self.logger.warning('Bad format in current line: '
'"{0}"...'.format(line[1:20]))
continue
try:
optional_kwargs = eval(
"dict({0})".format(info[2])
) if len(info) == 3 else {'ylim': 0.0}
except ValueError:
optional_kwargs = {'ylim': 0.0}
self.logger.debug('{0} | Plotting {1}'.format(self.system,
info[0]))
# Generate figure and encode to base64
plot_axis = gen_plot.plot_var(
self.data,
*[x.strip() for x in info[0].split(',')],
system=self.system,
logger=self.logger,
**optional_kwargs
)
_b64figure = gen_plot.to_base64(plot_axis)
plt.close(plot_axis.get_figure()) # close figure
if _b64figure:
yield (six.u(info[1].strip()),
codecs.decode(_b64figure, 'utf-8'))
except IOError:
self.logger.error('Graphs definition file not found: {0}'
.format(self.graphs_definition_file))
except Exception as unexpected:
self.logger.error('{0} | Unexpected exception found while '
'creating graphs: {1}'.format(self.system,
repr(unexpected)))
yield None
def gen_report(container, system):
"""
Convenience function for calling :meth:`.Report.render()` method
Arguments:
container (t4mon.Orchestrator):
object containing all the information required to render the report
system (str):
system for which the report will be generated
Return:
str
"""
_report = Report(container, system)
return _report.render()
| mit |
brianlorenz/COSMOS_IMACS_Redshifts | PlotCodes/Plot_bpt_paper.py | 1 | 11835 | #Creates a BPT diagram for all objects, and a second figure that shows objects for which single lines are low
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
from cycler import cycler
from matplotlib import cm
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux_red.txt'
#Location of the equivalent width data
ewdata = '/Users/blorenz/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
ew_df = ascii.read(ewdata).to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
d = {'True': True, 'False': False}
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#Fontsizes for plotting
axisfont = 24
ticksize = 18
ticks = 8
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#BPT
#Strings of all of the lines needed for bpt
lines = ['6563_fix','6583_fix','4861','5007']
Ha = lines[0]
N2 = lines[1]
Hb = lines[2]
O3 = lines[3]
#Set low lines to upper limits
#for line in lines:
# fluxdata.loc[dataqual[line+'_low'],line+'_flux'] = err_df[dataqual[line+'_low']][line+'_err']
#fig2,axarr2 = plt.subplots(2,2,figsize=(15,12))
#ax1,ax2,ax3,ax4 = axarr2[0,0],axarr2[0,1],axarr2[1,0],axarr2[1,1]
#Takes the dataframe and the four lines to combine into the bpt
def getbpt(pd_df,err_df,Ha,Hb,N2,O3):
errHa = err_df[Ha]
errN2 = err_df[N2]
errHb = err_df[Hb]
errO3 = err_df[O3]
#Divide by the scale to calibrate the flux
calHa = divz(pd_df[Ha+'_flux'],pd_df[Ha+'_scale'])
calHb = divz(pd_df[Hb+'_flux'],pd_df[Hb+'_scale'])
calN2 = divz(pd_df[N2+'_flux'],pd_df[N2+'_scale'])
calO3 = divz(pd_df[O3+'_flux'],pd_df[O3+'_scale'])
#Find the ratios
Harat = np.log10(divz(calN2,calHa))
Hbrat = np.log10(divz(calO3,calHb))
#Find the errors
eHarat = (1/np.log(10))*divz(calHa,calN2)*np.sqrt((divz(1,calHa) * errN2)**2 + (divz(-calN2,(calHa**2)) * errHa)**2)
eHbrat = (1/np.log(10))*divz(calHb,calO3)*np.sqrt((divz(1,calHb) * errO3)**2 + (divz(-calO3,(calHb**2)) * errHb)**2)
return (Harat,Hbrat,eHarat,eHbrat)
#Plotting parameters
ms = 3
lw=0.5
mark='o'
#Filter the data
goodlines = [dataqual[line+'_good'].map(d) for line in lines]
#Needs to be good in all lines to be good
allgood = np.logical_and.reduce(goodlines)
#Needs to be bad in any line to be bad
badlines = [dataqual[line+'_bad'].map(d) for line in lines]
baddata = np.logical_or.reduce(badlines)
lowlines = [dataqual[line+'_low'].map(d) for line in lines]
#Needs to be low in any line to be low, and also not bad in a line
somelow = np.logical_and(np.logical_or.reduce(lowlines),np.logical_not(baddata))
plotframe = pd.DataFrame()
plotframe['fluxfile'] = fluxdata['fluxfile']
plotframe['OBJID'] = fluxdata['OBJID']
plotframe['Mask'] = fluxdata['Mask']
#Get the x and y axes for the good, low, and bad data
for i in range(0,3):
if i==0:
filt = allgood
colname = 'good'
elif i==1:
filt = somelow
colname = 'low'
else:
filt = baddata
colname = 'bad'
bpts = getbpt(fluxdata[filt],err_df[filt],Ha,Hb,N2,O3)
plotframe[colname+'x'] = bpts[0]
plotframe[colname+'y'] = bpts[1]
plotframe[colname+'ex'] = bpts[2]
plotframe[colname+'ey'] = bpts[3]
#Find the duplicates
dupids = [item for item, count in collections.Counter(fluxdata[allgood]['OBJID']).items() if count > 1]
dupobjsarr = []
for obj in dupids:
dupobjsarr.append(fluxdata[fluxdata.OBJID == obj].OBJID)
#Line that defines the agn cutoff
xline = np.arange(-3.0,0.469,0.001)
yline = 0.61/(xline-0.47)+1.19 #Kewley (2001)
xlineemp = np.arange(-3.0,0.049,0.001)
ylineemp = 0.61/(xlineemp-0.05)+1.3 #Kauffman (2003)
#If duplicates, set to 1, otherwise 0
dup = 1
#Make the figures
if dup:
fig,dupax = plt.subplots(1,1,figsize=(8.5,7),sharex=True,sharey=True)
axarr = [1,2]
else: fig,axarr = plt.subplots(1,1,figsize=(8,7),sharex=True,sharey=True)
fig2,axarr2 = plt.subplots(1,1,figsize=(9,7),sharex=True,sharey=True)
#Color wheel if looking at duplicates
prop_iter = iter(plt.rcParams['axes.prop_cycle'])
colorsmask=['r','b','g','y','orange','purple','cyan','lime','gold','brown','pink']
masks=['a6','b6','d6','e6','f6','g6','h6','i6','b7','e7','j7']
#Plot the data with error bars
#Counter
c = 0
#for ax in axarr:
if 1==1:
ax=axarr
if ax==2:
pass
if c==0:
col = 'good'
color = 'blue'
elif c==1:
col = 'low'
color = 'orange'
else:
col = 'bad'
color = 'red'
p = plotframe
if (c==0 and dup):
ax = dupax
'''
for j in range(0,len(colorsmask)):
ax.plot(-100,-100,color=colorsmask[j],ms=ms,marker=mark,label=masks[j])
ax.legend()
'''
if dup: ax.errorbar(p[col+'x'],p[col+'y'],xerr=p[col+'ex'],yerr=p[col+'ey'],color='grey',ls='None',ms=ms,marker=mark,lw=lw,label=None)
else: ax.errorbar(p[col+'x'],p[col+'y'],xerr=p[col+'ex'],yerr=p[col+'ey'],color=color,ls='None',ms=ms,marker=mark,lw=lw,label=None)
#Titles, axes, legends
if c==0:
ax.set_ylabel('log(O[III] 5007 / H$\\beta$)',fontsize = axisfont)
ax.set_xlabel('log(N[II] 6583 / H$\\alpha$)',fontsize = axisfont)
ax.tick_params(labelsize = ticksize, size=ticks)
ax.set_xlim(-2,1)
ax.set_ylim(-1.2,1.5)
#Plot the bpt line
ax.plot(xline,yline,color='black',lw=1,ls='--',label='Theoretical, Kewley+ (2001)')
ax.plot(xlineemp,ylineemp,color='black',lw=1,ls='-',label='Empirical, Kauffmann+ (2003)')
ax.text(0.06,0.18,'Star-Forming',fontsize = textfont+6, transform=ax.transAxes)
ax.text(0.7,0.66,'AGN',fontsize = textfont+6, transform=ax.transAxes)
if c==0: ax.legend(fontsize=legendfont)
#Draw lines between duplicates:
if (c==0 and dup):
for i in range(0,len(dupobjsarr)):
pdup = p.iloc[dupobjsarr[i].index]
#colo = [colorsmask[masks.index(pdup.iloc[0]['Mask'])],colorsmask[masks.index(pdup.iloc[1]['Mask'])]]
#ax.errorbar(pdup[col+'x'],pdup[col+'y'],xerr=pdup[col+'ex'],yerr=pdup[col+'ey'],color=cm.jet(1.*i/len(dupobjsarr)),ls='-',ms=ms,marker=mark,lw=1,zorder=2)
ax.plot(pdup[col+'x'],pdup[col+'y'],color='red',ls='-',ms=ms,marker=mark,lw=1,zorder=2,label=None)
#ax.errorbar(pdup[col+'x'].iloc[0],pdup[col+'y'].iloc[0],xerr=pdup[col+'ex'].iloc[0],yerr=pdup[col+'ey'].iloc[0],color=colorsmask[masks.index(pdup.iloc[0]['Mask'])],ls='-',ms=ms,marker=mark,lw=1,zorder=2,label=None)
#ax.errorbar(pdup[col+'x'].iloc[1],pdup[col+'y'].iloc[1],xerr=pdup[col+'ex'].iloc[1],yerr=pdup[col+'ey'].iloc[1],color=colorsmask[masks.index(pdup.iloc[1]['Mask'])],ls='-',ms=ms,marker=mark,lw=1,zorder=2,label=None)
ax.errorbar(pdup[col+'x'].iloc[0],pdup[col+'y'].iloc[0],xerr=pdup[col+'ex'].iloc[0],yerr=pdup[col+'ey'].iloc[0],color='blue',ls='-',ms=ms,marker=mark,lw=1,zorder=2,label=None)
ax.errorbar(pdup[col+'x'].iloc[1],pdup[col+'y'].iloc[1],xerr=pdup[col+'ex'].iloc[1],yerr=pdup[col+'ey'].iloc[1],color='blue',ls='-',ms=ms,marker=mark,lw=1,zorder=2,label=None)
c = c+1
fig.tight_layout()
if dup: fig.savefig(figout + 'bpt_dup_paper.pdf')
else: fig.savefig(figout + 'bpt_paper.pdf')
plt.close(fig)
#Plot the data with error bars
c = 0
#for ax in axarr2:
if 1==1:
ax=axarr2
if c==0:
col = 'good'
filt = allgood
elif c==1:
col = 'low'
filt = somelow
else:
col = 'bad'
filt = baddata
#filt = np.logical_and(filt,plotframe['goodex']<0.5)
#filt = np.logical_and(filt,plotframe['goodey']<0.5)
colors = fluxdata[filt]['LMASS']
p = plotframe
ax.errorbar(p[filt][col+'x'],p[filt][col+'y'],xerr=p[filt][col+'ex'],yerr=p[filt][col+'ey'],c='black',ls='None',ms=ms,marker=mark,lw=lw,zorder=1,label=None)
colordata = ax.scatter(p[filt][col+'x'],p[filt][col+'y'],c=colors,marker=mark,zorder=3,label=None)
print len(p[filt][col+'x'])
#Titles, axes, legends
if c==0:
ax.set_ylabel('log(O[III] 5007 / H$\\beta$)',fontsize = axisfont)
ax.set_xlabel('log(N[II] 6583 / H$\\alpha$)',fontsize = axisfont)
ax.tick_params(labelsize = ticksize, size=ticks)
ax.set_xlim(-2,1)
ax.set_ylim(-1.2,1.5)
#Plot the bpt line
ax.plot(xline,yline,color='black',lw=1,ls='--',label='Theoretical, Kewley+ (2001)')
ax.plot(xlineemp,ylineemp,color='black',lw=1,ls='-',label='Empirical, Kauffmann+ (2003)')
ax.text(0.06,0.18,'Star-Forming',fontsize = textfont+6, transform=ax.transAxes)
ax.text(0.7,0.66,'AGN',fontsize = textfont+6, transform=ax.transAxes)
if c==0: ax.legend(fontsize=legendfont)
c = c+1
cb = fig2.colorbar(colordata)
cb.ax.tick_params(labelsize = ticksize)
fig2.text(0.96,0.76,'log(Stellar Mass) ($M_{sun}$)',fontsize = axisfont,rotation=90)
fig2.tight_layout()
fig2.savefig(figout + 'bpt_mass_paper.pdf')
plt.close(fig2)
'''
alllowerr = geterrarr(fluxdata[alllow],lines,alllowew)
Halowerr = geterrarr(fluxdata[Halow],lines,Halowew)
Hblowerr = geterrarr(fluxdata[Hblow],lines,Hblowew)
N2lowerr = geterrarr(fluxdata[N2low],lines,N2lowew)
O3lowerr = geterrarr(fluxdata[O3low],lines,O3lowew)
alllowx,alllowy,ealllowx,ealllowy = getbpt(fluxdata[alllow],alllowerr)
#Second figure:
counter = 0
for elmt in [ax1,ax2,ax3,ax4]:
#Plot the bpt line
elmt.plot(xline,yline,color='black',lw=1)
elmt.set_ylabel('Log(O[III] 5007 / H$\\beta$)',fontsize = axisfont)
elmt.set_xlabel('Log(N[II] 6583 / H$\\alpha$)',fontsize = axisfont)
elmt.tick_params(labelsize = ticksize)
elmt.set_xlim(-2,1)
elmt.set_ylim(-1.2,1.5)
#Depending on which loop it is, set the x and y data to a particular line
if counter == 0:
titlestring = 'H$\\alpha$'
lowlinex,lowliney,elowlinex,elowliney = getbpt(fluxdata[Halow],Halowerr)
if counter == 1:
titlestring = 'H$\\beta$'
lowlinex,lowliney,elowlinex,elowliney = getbpt(fluxdata[Hblow],Hblowerr)
if counter == 2:
titlestring = 'N[II]'
lowlinex,lowliney,elowlinex,elowliney = getbpt(fluxdata[N2low],N2lowerr)
if counter == 3:
titlestring = 'O[III}'
lowlinex,lowliney,elowlinex,elowliney = getbpt(fluxdata[O3low],O3lowerr)
elmt.set_title('BPT Diagram, low ' + titlestring,fontsize = titlefont)
#Plot the good data
elmt.errorbar(goodx,goody,xerr=egoodx,yerr=egoody,color='cornflowerblue',ls='None',ms=msg,marker=mark,label='Good in all lines ('+ str(len(goodflux)) + ')',lw=lw)
#Plot the ones that are low in this line
elmt.errorbar(lowlinex,lowliney,xerr=elowlinex,yerr=elowliney,color='orange',ls='None',ms=msg,marker=mark,label='Low ' + titlestring + ' ('+ str(len(lowlinex)-len(alllowx)) + ')',lw=lw)
counter = counter+1
#highlight the ones that are low in all lines, so they probably should not be trusted
elmt.errorbar(alllowx,alllowy,xerr=ealllowx,yerr=ealllowy,color='red',ls='None',ms=msg,marker=mark,label='Low in all lines' + ' ('+ str(len(alllowx)) + ')',lw=lw)
elmt.legend(fontsize = axisfont-6)
fig2.tight_layout()
#plt.show()
fig2.savefig(figout + 'bpt_low_err.pdf')
#plt.show()
plt.close(fig2)
'''
| mit |
nvoron23/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
kiyoto/statsmodels | statsmodels/genmod/generalized_estimating_equations.py | 19 | 97130 | """
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
KY Liang and S Zeger. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
S Zeger and KY Liang. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
A Rotnitzky and NP Jewell (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
LA Mancl LA, TA DeRouen (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from statsmodels.compat.python import iterkeys, range, lrange, lzip, zip
import numpy as np
from scipy import stats
import pandas as pd
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : array-like
1d array of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : array-like
2d array of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` array where `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be added by the user. See
`statsmodels.tools.add_constant`.
groups : array-like
A 1d array of length `nobs` containing the group labels.
time : array-like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : array-like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array-like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : array-like
An array of weights to use in the analysis. The weights must
be constant within each group. These correspond to
probability weights (pweights) in Stata.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downard bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
where N is the total sample size, and g is the average group size.
Examples
--------
%(example)s
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.family.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.family.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_nominal_family_doc = """\
The default value `None` uses a multinomial logit family
specifically designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
maxiter : integer
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : integer
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : integer
No dependence structure updates occur before this
iteration number.
cov_type : string
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the sum of squared
Pearson residuals divided by `N - ddof_scale`, where N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
where N is the total sample size and g is the average group
size.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
_gee_results_doc = """
Returns
-------
**Attributes**
cov_params_default : ndarray
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndarray
covariance of the parameter estimates that is robust
cov_naive : ndarray
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndarray
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the norm of the score is smaller than a threshold
cov_type : string
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that called `fit`.
normalized_cov_params : array
See GEE docstring
params : array
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_norm : float
norm of the score at the end of the iterative estimation.
bse : array
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print result.summary()
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject",
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print result.summary()
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject",
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print result.summary()
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print result.summary()
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print result.summary()
"""
_gee_nominal_example = """
Fit a nominal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("nominal")
>>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print result.summary()
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.NominalGEE.from_formula("y ~ x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print result.summary()
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.nominal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print result.summary()
"""
class GEE(base.Model):
__doc__ = (
" Estimation of marginal regression models using Generalized\n"
" Estimating Equations (GEE).\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example})
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if (family is not None) and not isinstance(family.link, tuple(family.safe_links)):
import warnings
warnings.warn("The %s link function does not respect the domain of the %s family." %
(family.link.__class__.__name__, family.__class__.__name__))
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
groups = np.array(groups) # in case groups is pandas
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
**kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the offset and exposure
self._offset_exposure = None
if offset is not None:
self._offset_exposure = self.offset.copy()
self.offset = offset
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError("exposure can only be used with the log link function")
if self._offset_exposure is not None:
self._offset_exposure += np.log(exposure)
else:
self._offset_exposure = np.log(exposure)
self.exposure = exposure
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError("GEE: the left hand side of the "
"constraint must have the same number of columns "
"as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = self.constraint.offset_increment().copy()
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = np.unique(self.groups, return_inverse=True)
se = pd.Series(index=np.arange(len(ix)))
gb = se.groupby(ix).groups
dk = [(lb, np.asarray(gb[k])) for k,lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.weights_li = [x[0] for x in self.weights_li]
self.weights_li = np.asarray(self.weights_li)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:,None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
if self._offset_exposure is not None:
self.offset_li = self.cluster_list(self._offset_exposure)
else:
self.offset_li = None
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # assumes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
# Override to allow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array-like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array-like
The data for the model.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : array-like or string
The time values, used for dependence structures involving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : array-like or string
The offset values, added to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : array-like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is added to the offset (if any). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : GEE model instance
Notes
------
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
This method currently does not correctly handle missing
values, so missing values should be explicitly dropped from
the DataFrame before calling this method.
""" % {'missing_param_doc' : base._missing_param_doc}
if type(groups) == str:
groups = data[groups]
if type(time) == str:
time = data[time]
if type(offset) == str:
offset = data[offset]
if type(exposure) == str:
exposure = data[exposure]
model = super(GEE, cls).from_formula(formula, data, subset,
groups, time=time,
offset=offset,
exposure=exposure,
*args, **kwargs)
return model
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def estimate_scale(self):
"""
Returns an estimate of the scale parameter at the current
parameter value.
"""
if isinstance(self.family, (families.Binomial, families.Poisson,
_Multinomial)):
return 1.
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
fsum = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
f = self.weights_li[i] if self.weights is not None else 1.
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
scale += f * np.sum(resid**2)
fsum += f * len(endog[i])
scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))
return scale
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed.
lin_pred : array-like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be added to
`lin_pred` prior to calling this function.
"""
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : array-like
Values of the independent variables at which the derivative
is calculated.
params : array-like
Parameter values at which the derivative is calculated.
offset_exposure : array-like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = np.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverse_deriv(lin_pred)
dmat = np.outer(idl, params)
return dmat
def _update_mean_params(self):
"""
Returns
-------
update : array-like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array-like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
score += f * np.dot(dmat.T, vinv_resid)
update = np.linalg.solve(bmat, score)
self._fit_history["cov_adjust"].append(
self.cov_struct.cov_adjust)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = np.dot(exog[i], mean_params)
if offset is not None:
lpr += offset[i]
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : array-like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
cov_naive : array-like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
cov_robust_bc : array-like
The "bias corrected" robust covariance of Mancl and
DeRouen.
cmat : array-like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
dvinv_resid = f * np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
bmati = np.linalg.inv(bmat)
cov_naive = bmati * scale
cov_robust = np.dot(bmati, np.dot(cmat, bmati))
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen (requires cov_naive so cannot be calculated
# in the previous loop).
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval,
i, sdev, (dmat,))
if rslt is None:
return None, None, None, None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, cov_naive)
hmat = np.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (aresid,))
if rslt is None:
return None, None, None, None
srt = rslt[0]
srt = f * np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
cov_robust_bc *= self.scaling_factor
return (cov_robust, cov_naive, cov_robust_bc, cmat)
def predict(self, params, exog=None, offset=None,
exposure=None, linear=False):
"""
Return predicted values for a marginal regression model fit
using GEE.
Parameters
----------
params : array-like
Parameters / coefficients of a marginal regression model.
exog : array-like, optional
Design / exogenous data. If exog is None, model exog is
used.
offset : array-like, optional
Offset for exog if provided. If offset is None, model
offset is used.
exposure : array-like, optional
Exposure for exog, if exposure is None, model exposure is
used. Only allowed if link function is the logarithm.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link
function at the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
Using log(V) as the offset is equivalent to using V as the
exposure. If exposure U and offset V are both provided, then
log(U) + V is added to the linear predictor.
"""
# TODO: many paths through this, not well covered in tests
if exposure is not None and not isinstance(self.family.link, families.links.Log):
raise ValueError("exposure can only be used with the log link function")
# This is the combined offset and exposure
_offset = 0.
# Using model exog
if exog is None:
exog = self.exog
if not isinstance(self.family.link, families.links.Log):
# Don't need to worry about exposure
if offset is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure.copy()
else:
_offset = offset
else:
if offset is None and exposure is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure
elif offset is None and exposure is not None:
_offset = np.log(exposure)
if hasattr(self, "offset"):
_offset = _offset + self.offset
elif offset is not None and exposure is None:
_offset = offset
if hasattr(self, "exposure"):
_offset = offset + np.log(self.exposure)
else:
_offset = offset + np.log(exposure)
# exog is provided: this is simpler than above because we
# never use model exog or exposure if exog is provided.
else:
if offset is not None:
_offset = _offset + offset
if exposure is not None:
_offset += np.log(exposure)
lin_pred = _offset + np.dot(exog, params)
if not linear:
return self.family.link.inverse(lin_pred)
return lin_pred
def _starting_params(self):
# TODO: use GLM to get Poisson starting values
return np.zeros(self.exog.shape[1])
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.):
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError("ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = {'params': [],
'score': [],
'dep_params': [],
'cov_adjust': []}
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
mean_params = self._starting_params()
else:
start_params = np.asarray(start_params)
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score**2))
self._fit_history['params'].append(mean_params.copy())
self._fit_history['score'].append(score)
self._fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Don't exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep == False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, bc_cov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
if self.constraint is not None:
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
#kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type = cov_type,
cov_robust = bcov,
cov_naive = ncov,
cov_robust_bc = bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we don't want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
delattr(self, "_fit_history")
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.maxiter = maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_norm", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"maxiter"]
return GEEResultsWrapper(results)
fit.__doc__ = _gee_fit_doc
def _handle_constraint(self, mean_params, bcov):
"""
Expand the parameter estimate `mean_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters
----------
mean_params : array-like
A parameter vector estimate for the reduced model.
bcov : array-like
The covariance matrix of mean_params.
Returns
-------
mean_params : array-like
The input parameter vector mean_params, expanded to the
coordinate system of the full model
bcov : array-like
The input covariance matrix bcov, expanded to the
coordinate system of the full model
"""
# The number of variables in the full model
red_p = len(mean_params)
full_p = self.constraint.lhs.shape[1]
mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]
# Get the score vector under the full model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_fulltrans_li
import copy
save_cached_means = copy.deepcopy(self.cached_means)
self.update_cached_means(mean_params0)
_, score = self._update_mean_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, _, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale**2
score2 = score[red_p:] / scale
amat = np.linalg.inv(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - \
np.dot(amat_12.T, np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
mean_params = self.constraint.unpack_param(mean_params)
bcov = self.constraint.unpack_cov(bcov)
self.exog_li = save_exog_li
self.cached_means = save_cached_means
self.exog = self.constraint.restore_exog()
return mean_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects, returns dF(XB) / dX where F(.)
is the fitted mean.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# This form should be appropriate for group 1 probit, logit,
# logistic, cloglog, heckprob, xtprobit.
offset_exposure = None
if exog is None:
exog = self.exog
offset_exposure = self._offset_exposure
margeff = self.mean_deriv_exog(exog, params, offset_exposure)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
class GEEResults(base.LikelihoodModelResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model using GEE.\n"
+ _gee_results_doc)
def __init__(self, model, params, cov_params, scale,
cov_type='robust', use_t=False, **kwds):
super(GEEResults, self).__init__(model, params,
normalized_cov_params=cov_params, scale=scale)
# not added by super
self.df_resid = model.df_resid
self.df_model = model.df_model
self.family = model.family
attr_kwds = kwds.pop('attr_kwds', {})
self.__dict__.update(attr_kwds)
# we don't do this if the cov_type has already been set
# subclasses can set it through attr_kwds
if not (hasattr(self, 'cov_type') and
hasattr(self, 'cov_params_default')):
self.cov_type = cov_type # keep alias
covariance_type = self.cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = "GEE: `cov_type` must be one of " +\
", ".join(allowed_covariances)
raise ValueError(msg)
if cov_type == "robust":
cov = self.cov_robust
elif cov_type == "naive":
cov = self.cov_naive
elif cov_type == "bias_reduced":
cov = self.cov_robust_bc
self.cov_params_default = cov
else:
if self.cov_type != cov_type:
raise ValueError('cov_type in argument is different from '
'already attached cov_type')
def standard_errors(self, cov_type="robust"):
"""
This is a convenience function that returns the standard
errors for any covariance type. The value of `bse` is the
standard errors for whichever covariance type is specified as
an argument to `fit` (defaults to "robust").
Parameters
----------
cov_type : string
One of "robust", "naive", or "bias_reduced". Determines
the covariance used to compute standard errors. Defaults
to "robust".
"""
# Check covariance_type
covariance_type = cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = "GEE: `covariance_type` must be one of " +\
", ".join(allowed_covariances)
raise ValueError(msg)
if covariance_type == "robust":
return np.sqrt(np.diag(self.cov_robust))
elif covariance_type == "naive":
return np.sqrt(np.diag(self.cov_naive))
elif covariance_type == "bias_reduced":
return np.sqrt(np.diag(self.cov_robust_bc))
# Need to override to allow for different covariance types.
@cache_readonly
def bse(self):
return self.standard_errors(self.cov_type)
@cache_readonly
def resid(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model.
"""
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_split(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model. The residuals are returned as a list
of arrays containing the residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.resid[ii])
return sresid
@cache_readonly
def resid_centered(self):
"""
Returns the residuals centered within each group.
"""
cresid = self.resid.copy()
for v in self.model.group_labels:
ii = self.model.group_indices[v]
cresid[ii] -= cresid[ii].mean()
return cresid
@cache_readonly
def resid_centered_split(self):
"""
Returns the residuals centered within each group. The
residuals are returned as a list of arrays containing the
centered residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.centered_resid[ii])
return sresid
# FIXME: alias to be removed, temporary backwards compatibility
split_resid = resid_split
centered_resid = resid_centered
split_centered_resid = resid_centered_split
@cache_readonly
def resid_response(self):
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_pearson(self):
val = self.model.endog - self.fittedvalues
val = val / np.sqrt(self.family.variance(self.fittedvalues))
return val
@cache_readonly
def resid_working(self):
val = self.resid_response
val = val / self.family.link.deriv(self.fittedvalues)
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self.model.endog, self.fittedvalues)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self.model.endog, self.fittedvalues)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values from the model.
"""
return self.model.family.link.inverse(np.dot(self.model.exog,
self.params))
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc' : ''}
def plot_partial_residuals(self, focus_exog, ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc' : ''}
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc' : ''}
def conf_int(self, alpha=.05, cols=None, cov_type=None):
"""
Returns confidence intervals for the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. i.e., The
default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
cov_type : string
The covariance type used for computing standard errors;
must be one of 'robust', 'naive', and 'bias reduced'.
See `GEE` for details.
Notes
-----
The confidence interval is based on the Gaussian distribution.
"""
# super doesn't allow to specify cov_type and method is not
# implemented,
# FIXME: remove this method here
if cov_type is None:
bse = self.bse
else:
bse = self.standard_errors(cov_type=cov_type)
params = self.params
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = params[cols] - q * bse[cols]
upper = params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the GEE regression results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
cov_type : string
The covariance type used to compute the standard errors;
one of 'robust' (the usual robust sandwich-type covariance
estimate), 'naive' (ignores dependence), and 'bias
reduced' (the Mancl/DeRouen estimate).
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Generalized']),
('', ['Estimating Equations']),
('Family:', [self.model.family.__class__.__name__]),
('Dependence structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Covariance type: ', [self.cov_type,])
]
NY = [len(y) for y in self.model.endog_li]
top_right = [('No. Observations:', [sum(NY)]),
('No. clusters:', [len(self.model.endog_li)]),
('Min. cluster size:', [min(NY)]),
('Max. cluster size:', [max(NY)]),
('Mean cluster size:', ["%.1f" % np.mean(NY)]),
('Num. iterations:', ['%d' %
len(self.fit_history['params'])]),
('Scale:', ["%.3f" % self.scale]),
('Time:', None),
]
# The skew of the residuals
skew1 = stats.skew(self.resid)
kurt1 = stats.kurtosis(self.resid)
skew2 = stats.skew(self.centered_resid)
kurt2 = stats.kurtosis(self.centered_resid)
diagn_left = [('Skew:', ["%12.4f" % skew1]),
('Centered skew:', ["%12.4f" % skew2])]
diagn_right = [('Kurtosis:', ["%12.4f" % kurt1]),
('Centered kurtosis:', ["%12.4f" % kurt2])
]
if title is None:
title = self.model.__class__.__name__ + ' ' +\
"Regression Results"
# Override the dataframe names if xname is provided as an
# argument.
if xname is not None:
xna = xname
else:
xna = self.model.exog_names
# Create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=self.model.endog_names, xname=xna,
title=title)
smry.add_table_params(self, yname=yname, xname=xna,
alpha=alpha, use_t=False)
smry.add_table_2cols(self, gleft=diagn_left,
gright=diagn_right, yname=yname,
xname=xna, title="")
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is 'all'
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
if self.model.constraint is not None:
warnings.warn("marginal effects ignore constraints")
return GEEMargins(self, (at, method, atexog, dummy, count))
def plot_isotropic_dependence(self, ax=None, xpoints=10,
min_n=50):
"""
Create a plot of the pairwise products of within-group
residuals against the corresponding time differences. This
plot can be used to assess the possible form of an isotropic
covariance structure.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
xpoints : scalar or array-like
If scalar, the number of points equally spaced points on
the time difference axis used to define bins for
calculating local means. If an array, the specific points
that define the bins.
min_n : integer
The minimum sample size in a bin for the mean residual
product to be included on the plot.
"""
from statsmodels.graphics import utils as gutils
resid = self.model.cluster_list(self.resid)
time = self.model.cluster_list(self.model.time)
# All within-group pairwise time distances (xdt) and the
# corresponding products of scaled residuals (xre).
xre, xdt = [], []
for re, ti in zip(resid, time):
ix = np.tril_indices(re.shape[0], 0)
re = re[ix[0]] * re[ix[1]] / self.scale**2
xre.append(re)
dists = np.sqrt(((ti[ix[0],:] - ti[ix[1],:])**2).sum(1))
xdt.append(dists)
xre = np.concatenate(xre)
xdt = np.concatenate(xdt)
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# Convert to a correlation
ii = np.flatnonzero(xdt == 0)
v0 = np.mean(xre[ii])
xre /= v0
# Use the simple average to smooth, since fancier smoothers
# that trim and downweight outliers give biased results (we
# need the actual mean of a skewed distribution).
if np.isscalar(xpoints):
xpoints = np.linspace(0, max(xdt), xpoints)
dg = np.digitize(xdt, xpoints)
dgu = np.unique(dg)
hist = np.asarray([np.sum(dg==k) for k in dgu])
ii = np.flatnonzero(hist >= min_n)
dgu = dgu[ii]
dgy = np.asarray([np.mean(xre[dg==k]) for k in dgu])
dgx = np.asarray([np.mean(xdt[dg==k]) for k in dgu])
ax.plot(dgx, dgy, '-', color='orange', lw=5)
ax.set_xlabel("Time difference")
ax.set_ylabel("Product of scaled residuals")
return fig
def sensitivity_params(self, dep_params_first,
dep_params_last, num_steps):
"""
Refits the GEE model using a sequence of values for the
dependence parameters.
Parameters
----------
dep_params_first : array-like
The first dep_params in the sequence
dep_params_last : array-like
The last dep_params in the sequence
num_steps : int
The number of dep_params in the sequence
Returns
-------
results : array-like
The GEEResults objects resulting from the fits.
"""
model = self.model
import copy
cov_struct = copy.deepcopy(self.model.cov_struct)
# We are fixing the dependence structure in each run.
update_dep = model.update_dep
model.update_dep = False
dep_params = []
results = []
for x in np.linspace(0, 1, num_steps):
dp = x * dep_params_last + (1 - x) * dep_params_first
dep_params.append(dp)
model.cov_struct = copy.deepcopy(cov_struct)
model.cov_struct.dep_params = dp
rslt = model.fit(start_params=self.params,
ctol=self.ctol,
params_niter=self.params_niter,
first_dep_update=self.first_dep_update,
cov_type=self.cov_type)
results.append(rslt)
model.update_dep = update_dep
return results
# FIXME: alias to be removed, temporary backwards compatibility
params_sensitivity = sensitivity_params
class GEEResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'centered_resid' : 'rows',
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GEEResultsWrapper, GEEResults)
class OrdinalGEE(GEE):
__doc__ = (
" Estimation of ordinal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_ordinal_family_doc,
'example': _gee_ordinal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
if family is None:
family = families.Binomial()
else:
if not isinstance(family, families.Binomial):
raise ValueError("ordinal GEE must use a Binomial family")
if cov_struct is None:
cov_struct = cov_structs.OrdinalIndependence()
endog, exog, groups, time, offset = self.setup_ordinal(endog,
exog, groups, time, offset)
super(OrdinalGEE, self).__init__(endog, exog, groups, time,
family, cov_struct, missing,
offset, dep_data, constraint)
def setup_ordinal(self, endog, exog, groups, time, offset):
"""
Restructure ordinal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog),1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
nrows = ncut * len(endog)
exog_out = np.zeros((nrows, exog.shape[1]),
dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
intercepts = np.zeros((nrows, ncut), dtype=np.float64)
groups_out = np.zeros(nrows, dtype=groups.dtype)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
exog_out[jrow, :] = exog_row
endog_out[jrow] = (int(endog_value > thresh))
intercepts[jrow, thresh_ix] = 1
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
exog_out = np.concatenate((intercepts, exog_out), axis=1)
# exog column names, including intercepts
xnames = ["I(y>%.1f)" % v for v in endog_cuts]
if type(self.exog_orig) == pd.DataFrame:
xnames.extend(self.exog_orig.columns)
else:
xnames.extend(["x%d" % k for k in range(1, exog.shape[1]+1)])
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve the endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(OrdinalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to an OrdinalGEEResults
ord_rslt = OrdinalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
#for k in rslt._props:
# setattr(ord_rslt, k, getattr(rslt, k))
return OrdinalGEEResultsWrapper(ord_rslt)
fit.__doc__ = _gee_fit_doc
class OrdinalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for an ordinal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an ordinal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array-like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>> ev = [{"sex": 1}, {"sex": 0}]
>> rslt.distribution_plot(exog_values=ev)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{},]
exog_means = self.model.exog.mean(0)
ix_icept = [i for i,x in enumerate(self.model.exog_names) if
x.startswith("I(")]
for ev in exog_values:
for k in ev.keys():
if k not in self.model.exog_names:
raise ValueError("%s is not a variable in the model"
% k)
# Get the fitted probability for each level, at the given
# covariate values.
pr = []
for j in ix_icept:
xp = np.zeros_like(self.params)
xp[j] = 1.
for i,vn in enumerate(self.model.exog_names):
if i in ix_icept:
continue
# User-specified value
if vn in ev:
xp[i] = ev[vn]
# Mean value
else:
xp[i] = exog_means[i]
p = 1 / (1 + np.exp(-np.dot(xp, self.params)))
pr.append(p)
pr.insert(0, 1)
pr.append(0)
pr = np.asarray(pr)
prd = -np.diff(pr)
ax.plot(self.model.endog_values, prd, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_ylim(0, 1)
return fig
class OrdinalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(OrdinalGEEResultsWrapper, OrdinalGEEResults)
class NominalGEE(GEE):
__doc__ = (
" Estimation of nominal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_nominal_family_doc,
'example': _gee_nominal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
endog, exog, groups, time, offset = self.setup_nominal(endog,
exog, groups, time, offset)
if family is None:
family = _Multinomial(self.ncut+1)
if cov_struct is None:
cov_struct = cov_structs.NominalIndependence()
super(NominalGEE, self).__init__(endog, exog, groups,
time, family, cov_struct, missing, offset, dep_data,
constraint)
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def setup_nominal(self, endog, exog, groups, time, offset):
"""
Restructure nominal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog),1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
self.ncut = ncut
nrows = len(endog_cuts) * exog.shape[0]
ncols = len(endog_cuts) * exog.shape[1]
exog_out = np.zeros((nrows, ncols), dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
groups_out = np.zeros(nrows, dtype=np.float64)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
u = np.zeros(len(endog_cuts), dtype=np.float64)
u[thresh_ix] = 1
exog_out[jrow, :] = np.kron(u, exog_row)
endog_out[jrow] = (int(endog_value == thresh))
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
# exog names
if type(self.exog_orig) == pd.DataFrame:
xnames_in = self.exog_orig.columns
else:
xnames_in = ["x%d" % k for k in range(1, exog.shape[1]+1)]
xnames = []
for tr in endog_cuts:
xnames.extend(["%s[%.1f]" % (v, tr) for v in xnames_in])
exog_out = pd.DataFrame(exog_out, columns=xnames)
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lin_pred : array-like
The values of the linear predictor, length must be multiple
of `ncut`.
Returns
-------
The derivative of the expected endog with respect to the
parameters.
"""
expval = np.exp(lin_pred)
# Reshape so that each row contains all the indicators
# corresponding to one multinomial observation.
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
# The normalizing constant for the multinomial probabilities.
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
# The multinomial probabilities
mprob = expval / denom
# First term of the derivative: denom * expval' / denom^2 =
# expval' / denom.
dmat = mprob[:, None] * exog
# Second term of the derivative: -expval * denom' / denom^2
ddenom = expval[:, None] * exog
dmat -= mprob[:, None] * ddenom / denom[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog for the
multinomial model, used in analyzing marginal effects.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lpr : array-like
The linear predictor values, length must be multiple of
`ncut`.
Returns
-------
The value of the derivative of the expected endog with respect
to exog.
Notes
-----
offset_exposure must be set at None for the multinoial family.
"""
if offset_exposure is not None:
warnings.warn("Offset/exposure ignored for the multinomial family")
lpr = np.dot(exog, params)
expval = np.exp(lpr)
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
bmat0 = np.outer(np.ones(exog.shape[0]), params)
# Masking matrix
qmat = []
for j in range(self.ncut):
ee = np.zeros(self.ncut, dtype=np.float64)
ee[j] = 1
qmat.append(np.kron(ee, np.ones(len(params) // self.ncut)))
qmat = np.array(qmat)
qmat = np.kron(np.ones((exog.shape[0] // self.ncut, 1)), qmat)
bmat = bmat0 * qmat
dmat = expval[:, None] * bmat / denom[:, None]
expval_mb = np.kron(expval_m, np.ones((self.ncut, 1)))
expval_mb = np.kron(expval_mb, np.ones((1, self.ncut)))
dmat -= expval[:, None] * (bmat * expval_mb) / denom[:, None]**2
return dmat
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(NominalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
if rslt is None:
warnings.warn("GEE updates did not converge",
ConvergenceWarning)
return None
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to a NominalGEEResults
nom_rslt = NominalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
#for k in rslt._props:
# setattr(nom_rslt, k, getattr(rslt, k))
return NominalGEEResultsWrapper(nom_rslt)
fit.__doc__ = _gee_fit_doc
class NominalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for a nominal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an nominal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array-like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ex = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ex)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{},]
link = self.model.family.link.inverse
ncut = self.model.family.ncut
k = self.model.exog.shape[1] / ncut
exog_means = self.model.exog.mean(0)[0:k]
exog_names = self.model.exog_names[0:k]
exog_names = [x.split("[")[0] for x in exog_names]
params = np.reshape(self.params,
(ncut, len(self.params) / ncut))
for ev in exog_values:
exog = exog_means.copy()
for k in ev.keys():
if k not in exog_names:
raise ValueError("%s is not a variable in the model"
% k)
ii = exog_names.index(k)
exog[ii] = ev[k]
lpr = np.dot(params, exog)
pr = link(lpr)
pr = np.r_[pr, 1-pr.sum()]
ax.plot(self.model.endog_values, pr, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_xticks(self.model.endog_values)
ax.set_xticklabels(self.model.endog_values)
ax.set_ylim(0, 1)
return fig
class NominalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(NominalGEEResultsWrapper, NominalGEEResults)
class _MultinomialLogit(Link):
"""
The multinomial logit transform, only for use with GEE.
Notes
-----
The data are assumed coded as binary indicators, where each
observed multinomial value y is coded as I(y == S[0]), ..., I(y ==
S[-1]), where S is the set of possible response labels, excluding
the largest one. Thererefore functions in this class should only
be called using vector argument whose length is a multiple of |S|
= ncut, which is an argument to be provided when initializing the
class.
call and derivative use a private method _clean to trim p by 1e-10
so that p is in (0, 1)
"""
def __init__(self, ncut):
self.ncut = ncut
def inverse(self, lpr):
"""
Inverse of the multinomial logit transform, which gives the
expected values of the data as a function of the linear
predictors.
Parameters
----------
lpr : array-like (length must be divisible by `ncut`)
The linear predictors
Returns
-------
prob : array
Probabilities, or expected values
"""
expval = np.exp(lpr)
denom = 1 + np.reshape(expval, (len(expval) // self.ncut,
self.ncut)).sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
prob = expval / denom
return prob
class _Multinomial(families.Family):
"""
Pseudo-link function for fitting nominal multinomial models with
GEE. Not for use outside the GEE class.
"""
links = [_MultinomialLogit,]
variance = varfuncs.binary
safe_links = [_MultinomialLogit,]
def __init__(self, nlevels):
"""
Parameters
----------
nlevels : integer
The number of distinct categories for the multinomial
distribution.
"""
self.initialize(nlevels)
def initialize(self, nlevels):
self.ncut = nlevels - 1
self.link = _MultinomialLogit(self.ncut)
from statsmodels.discrete.discrete_margins import (_get_margeff_exog,
_get_const_index, _check_margeff_args, _effects_at,
margeff_cov_with_se, _check_at_is_all, _transform_names,
_check_discrete_args, _get_dummy_index, _get_count_index)
class GEEMargins(object):
"""
Estimated marginal effects for a regression model fit with GEE.
Parameters
----------
results : GEEResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = resettable_cache()
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = resettable_cache()
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
"""
_check_at_is_all(self.margeff_options)
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i, name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return stats.norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = stats.norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]),]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
# sigh, we really need to hold on to this in _data...
_, const_idx = _get_const_index(model.exog)
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
#NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:, eq], margeff_se[:, eq],
tvalues[:, eq], pvalues[:, eq], conf_int[:, :, eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha, use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
tble.insert_header_row(0, header)
#from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
self._reset() # always reset the cache when this is called
#TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx, const_idx = _get_const_index(exog)
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
effects = _effects_at(effects, at)
if at == 'all':
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(model, params, exog,
results.cov_params(), at,
model._derivative_exog,
dummy_idx, count_idx,
method, 1)
# don't care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/learning_curve.py | 35 | 15441 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.learning_curve` instead.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.validation_curve` instead.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
jskDr/jamespy_py3 | tflearn/linear_regression.py | 7 | 2600 | '''
A linear regression learning algorithm example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 2000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Create Model
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
activation = tf.add(tf.mul(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
"W=", sess.run(W), "b=", sess.run(b)
print "Optimization Finished!"
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
# Testing example, as requested (Issue #2)
test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1])
test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03])
print "Testing... (L2 loss Comparison)"
testing_cost = sess.run(tf.reduce_sum(tf.pow(activation-Y, 2))/(2*test_X.shape[0]),
feed_dict={X: test_X, Y: test_Y}) #same function as cost above
print "Testing cost=", testing_cost
print "Absolute l2 loss difference:", abs(training_cost - testing_cost)
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(test_X, test_Y, 'bo', label='Testing data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show() | mit |
dwhswenson/openpathsampling | openpathsampling/tests/test_tis_analysis.py | 3 | 48571 | import itertools
import random
import pytest
from nose.tools import assert_equal, assert_almost_equal, raises
from .test_helpers import (make_1d_traj, MoverWithSignature, RandomMDEngine,
assert_frame_equal, assert_items_equal)
from openpathsampling.analysis.tis import *
from openpathsampling.analysis.tis.core import steps_to_weighted_trajectories
from openpathsampling.analysis.tis.flux import default_flux_sort
import openpathsampling as paths
import pandas as pd
import pandas.testing as pdt
import logging
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.ensemble').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
def make_tis_traj_fixed_steps(n_steps, step_size=0.1, reverse=False):
if reverse:
sign = -1
delta = 1.0
else:
sign = 1
delta = 0.0
rising = [delta + sign * (-0.5 + i) * step_size
for i in range(n_steps + 2)]
falling = list(reversed(rising))[1:]
return make_1d_traj(rising + falling)
class TestMultiEnsembleSamplingAnalyzer(object):
# this only has to check the error generation; everything else gets
# covered by tests of subclasses
@raises(RuntimeError)
def test_no_ensembles(self):
histogrammer = MultiEnsembleSamplingAnalyzer()
histogrammer.calculate([])
class TISAnalysisTester(object):
# abstract class to give the same setup to all the test functions
def _make_fake_steps(self, sample_sets, mover):
steps = []
for (mccycle, sample_set) in enumerate(sample_sets):
change = paths.AcceptedSampleMoveChange(
samples=sample_set.samples,
mover=mover,
details=None,
input_samples=None
)
step = paths.MCStep(mccycle=mccycle,
active=sample_set,
change=change)
steps.append(step)
return steps
def _make_fake_sampling_sets(self, network):
ensembles_AB = self.sampling_ensembles_for_transition(
network, self.state_A, self.state_B
)
ensembles_BA = self.sampling_ensembles_for_transition(
network, self.state_B, self.state_A
)
all_ensembles = ensembles_AB + ensembles_BA
# This encodes how the SampleSets are at each time step. This is the
# trajectory number (from trajs_AB/trajs_BA) for each ensemble
# (index order of sampling_AB.ensembles + sampling_BA.ensembles)
descriptions = [
[2, 3, 3, 2, 3, 3],
[1, 2, 3, 1, 2, 3],
[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]
]
# here's the fancy fake data
sample_sets = []
for descr in descriptions:
set_trajectories = ([self.trajs_AB[d] for d in descr[:3]]
+ [self.trajs_BA[d] for d in descr[3:]])
sample_set = paths.SampleSet([
paths.Sample(trajectory=traj,
ensemble=ens,
replica=rep)
for (traj, ens, rep) in zip(set_trajectories, all_ensembles,
range(len(all_ensembles)))
])
sample_sets.append(sample_set)
return sample_sets
def sampling_ensembles_for_transition(self, network, state_A, state_B):
analysis_AB = network.transitions[(state_A, state_B)]
sampling_AB = network.analysis_to_sampling[analysis_AB][0]
return sampling_AB.ensembles
def setup(self):
# set up the trajectories, ensembles, etc. for this test
self.HAS_TQDM = paths.progress.HAS_TQDM
paths.progress.HAS_TQDM = False # turn of progress bars
paths.InterfaceSet._reset()
cv_A = paths.FunctionCV('Id', lambda s: s.xyz[0][0])
cv_B = paths.FunctionCV('1-Id', lambda s: 1.0-s.xyz[0][0])
self.cv_x = cv_A
self.state_A = paths.CVDefinedVolume(cv_A,
float("-inf"), 0.0).named("A")
self.state_B = paths.CVDefinedVolume(cv_B,
float("-inf"), 0.0).named("B")
interfaces_AB = paths.VolumeInterfaceSet(cv_A, float("-inf"),
[0.0, 0.1, 0.2])
interfaces_BA = paths.VolumeInterfaceSet(cv_B, float("-inf"),
[0.0, 0.1, 0.2])
# trajectory that crosses each interface, one state-to-state
self.trajs_AB = [make_tis_traj_fixed_steps(i) for i in [0, 1, 2]]
self.trajs_AB += [make_1d_traj([(-0.5 + i) * 0.1
for i in range(12)])]
self.trajs_BA = [make_tis_traj_fixed_steps(i, reverse=True)
for i in [0, 1, 2]]
self.trajs_BA += [make_1d_traj([1.0 - (-0.5 + i) * 0.1
for i in range(12)])]
# set up mistis
self.mistis = paths.MISTISNetwork([
(self.state_A, interfaces_AB, self.state_B),
(self.state_B, interfaces_BA, self.state_A)
])
mover_stub_mistis = MoverWithSignature(self.mistis.all_ensembles,
self.mistis.all_ensembles)
mistis_ssets = self._make_fake_sampling_sets(self.mistis)
self.mistis_steps = self._make_fake_steps(mistis_ssets,
mover_stub_mistis)
self.mistis_weighted_trajectories = steps_to_weighted_trajectories(
self.mistis_steps,
self.mistis.sampling_ensembles
)
# TODO: set up mstis
self.mstis = paths.MSTISNetwork([
(self.state_A, interfaces_AB),
(self.state_B, interfaces_BA)
])
mover_stub_mstis = MoverWithSignature(self.mstis.all_ensembles,
self.mstis.all_ensembles)
mstis_ssets = self._make_fake_sampling_sets(self.mstis)
self.mstis_steps = self._make_fake_steps(mstis_ssets,
mover_stub_mstis)
self.mstis_weighted_trajectories = steps_to_weighted_trajectories(
self.mstis_steps,
self.mstis.sampling_ensembles
)
def teardown(self):
paths.progress.HAS_TQDM = self.HAS_TQDM
class TestWeightedTrajectories(TISAnalysisTester):
def _check_network_results(self, network, weighted_trajs):
# works for both MISTIS and MSTIS, since they use equivalent data
ensembles_AB = self.sampling_ensembles_for_transition(
network, self.state_A, self.state_B
)
ensembles_BA = self.sampling_ensembles_for_transition(
network, self.state_B, self.state_A
)
# (ensemble_number, trajectory_number): count
results = {(0, 0): 2, (0, 1): 1, (0, 2): 1, (0, 3): 0,
(1, 0): 0, (1, 1): 2, (1, 2): 1, (1, 3): 1,
(2, 0): 0, (2, 1): 0, (2, 2): 2, (2, 3): 2}
for ((ens, traj), result) in results.items():
assert_equal(
weighted_trajs[ensembles_AB[ens]][self.trajs_AB[traj]],
result
)
assert_equal(
weighted_trajs[ensembles_BA[ens]][self.trajs_BA[traj]],
result
)
def test_steps_to_weighted_trajectories(self):
assert_equal(len(self.mistis_weighted_trajectories),
len(self.mistis.sampling_ensembles))
self._check_network_results(self.mistis,
self.mistis_weighted_trajectories)
assert_equal(len(self.mstis_weighted_trajectories),
len(self.mstis.sampling_ensembles))
self._check_network_results(self.mstis,
self.mstis_weighted_trajectories)
class TestFluxToPandas(TISAnalysisTester):
# includes tests for default_flux_sort and flux_matrix_pd
# as a class to simplify setup of flux objects
def setup(self):
super(TestFluxToPandas, self).setup()
interfaces_A = self.mstis.from_state[self.state_A].interfaces
interfaces_B = self.mstis.from_state[self.state_B].interfaces
pairs_A = list(itertools.product([self.state_A], interfaces_A))
pairs_B = list(itertools.product([self.state_B], interfaces_B))
# note that this gives the canonical order we desire
self.all_pairs = pairs_A + pairs_B
self.default_ordered_results = [
((self.state_A, interfaces_A[0]), 10.0),
((self.state_A, interfaces_A[1]), 5.0),
((self.state_A, interfaces_A[2]), 2.0),
((self.state_B, interfaces_B[0]), 1.0),
((self.state_B, interfaces_B[1]), 0.5),
((self.state_B, interfaces_B[2]), 0.2)
]
shuffled_fluxes = self.default_ordered_results[:]
random.shuffle(shuffled_fluxes)
self.fluxes = {key: value for (key, value) in shuffled_fluxes}
self.indices = [
("A", "-inf<Id<0.0"), ("A", "-inf<Id<0.1"), ("A", "-inf<Id<0.2"),
("B", "-inf<1-Id<0.0"), ("B", "-inf<1-Id<0.1"),
("B", "-inf<1-Id<0.2")
]
values = [value for (key, value) in self.default_ordered_results]
index = pd.MultiIndex.from_tuples(self.indices,
names=["State", "Interface"])
self.expected_series = pd.Series(values, name="Flux", index=index)
def test_default_flux_sort(self):
shuffled = self.all_pairs[:]
random.shuffle(shuffled)
sorted_result = default_flux_sort(shuffled)
assert_items_equal(sorted_result, self.all_pairs)
def test_flux_matrix_pd_default(self):
series = flux_matrix_pd(self.fluxes)
pdt.assert_series_equal(series, self.expected_series)
def test_flux_matrix_pd_None(self):
series = flux_matrix_pd(self.fluxes, sort_method=None)
for idx in self.indices:
assert_almost_equal(series[idx], self.expected_series[idx])
@raises(KeyError)
def test_flux_matrix_pd_unknown_str(self):
flux_matrix_pd(self.fluxes, sort_method="foo")
class TestDictFlux(TISAnalysisTester):
def setup(self):
super(TestDictFlux, self).setup()
self.innermost_interface_A = \
self.sampling_ensembles_for_transition(self.mistis,
self.state_A,
self.state_B)[0]
self.innermost_interface_B = \
self.sampling_ensembles_for_transition(self.mistis,
self.state_B,
self.state_A)[0]
self.flux_dict = {(self.state_A, self.innermost_interface_A): 1.0,
(self.state_B, self.innermost_interface_B): 1.0}
self.flux_method = DictFlux(self.flux_dict)
def test_calculate(self):
assert_equal(self.flux_method.calculate(self.mistis_steps),
self.flux_dict)
def test_from_weighted_trajectories(self):
assert_equal(
self.flux_method.from_weighted_trajectories(self.mistis_steps),
self.flux_dict
)
def test_intermediates(self):
assert_equal(self.flux_method.intermediates(self.mistis_steps), [])
def test_calculate_from_intermediates(self):
intermediates = self.flux_method.intermediates(self.mistis_steps)
assert_equal(
self.flux_method.calculate_from_intermediates(*intermediates),
self.flux_dict
)
def test_combine_results(self):
my_result = self.flux_method.calculate(self.mistis_steps)
same_result = {(self.state_A, self.innermost_interface_A): 1.0,
(self.state_B, self.innermost_interface_B): 1.0}
assert_equal(
self.flux_method.combine_results(my_result, same_result),
my_result
)
@raises(RuntimeError)
def test_bad_combine_results(self):
my_result = self.flux_method.calculate(self.mistis_steps)
bad_result = {(self.state_A, self.innermost_interface_A): 2.0,
(self.state_B, self.innermost_interface_B): 2.0}
self.flux_method.combine_results(my_result, bad_result)
class TestMinusMoveFlux(TISAnalysisTester):
def setup(self):
super(TestMinusMoveFlux, self).setup()
a = 0.1 # just a number to simplify the trajectory-making
minus_move_descriptions = [
[-a, a, a, -a, -a, -a, -a, -a, a, a, a, a, a, -a],
[-a, a, a, a, -a, -a, -a, a, a, a, -a]
]
engine = RandomMDEngine() # to get snapshot_timestep
self.mistis_scheme = paths.DefaultScheme(self.mistis, engine)
self.mistis_scheme.build_move_decision_tree()
self.mistis_minus_steps = self._make_fake_minus_steps(
scheme=self.mistis_scheme,
descriptions=minus_move_descriptions
)
self.mistis_minus_flux = MinusMoveFlux(self.mistis_scheme)
self.mstis_scheme = paths.DefaultScheme(self.mstis, engine)
self.mstis_scheme.build_move_decision_tree()
self.mstis_minus_steps = self._make_fake_minus_steps(
scheme=self.mstis_scheme,
descriptions=minus_move_descriptions
)
self.mstis_minus_flux = MinusMoveFlux(self.mstis_scheme)
def _make_fake_minus_steps(self, scheme, descriptions):
network = scheme.network
state_adjustment = {
self.state_A: lambda x: x,
self.state_B: lambda x: 1.0 - x
}
minus_ensemble_to_mover = {m.minus_ensemble: m
for m in scheme.movers['minus']}
assert_equal(set(minus_ensemble_to_mover.keys()),
set(network.minus_ensembles))
steps = []
mccycle = 0
for minus_traj in descriptions:
for i, minus_ensemble in enumerate(network.minus_ensembles):
replica = -1 - i
adjustment = state_adjustment[minus_ensemble.state_vol]
traj = make_1d_traj([adjustment(s) for s in minus_traj])
assert_equal(minus_ensemble(traj), True)
samp = paths.Sample(trajectory=traj,
ensemble=minus_ensemble,
replica=replica)
sample_set = paths.SampleSet([samp])
change = paths.AcceptedSampleMoveChange(
samples=[samp],
mover=minus_ensemble_to_mover[samp.ensemble],
details=paths.Details()
)
# NOTE: this makes it so that only one ensemble is
# represented in the same set at any time, which isn't quite
# how it actually works. However, this is doesn't matter for
# the current implementation
steps.append(paths.MCStep(mccycle=mccycle,
active=sample_set,
change=change))
mccycle += 1
assert_equal(len(steps), 4)
return steps
def test_get_minus_steps(self):
all_mistis_steps = self.mistis_steps + self.mistis_minus_steps
mistis_minus_steps = \
self.mistis_minus_flux._get_minus_steps(all_mistis_steps)
assert_equal(len(mistis_minus_steps), len(self.mistis_minus_steps))
assert_items_equal(mistis_minus_steps, self.mistis_minus_steps)
# this could be repeated for MSTIS, but why?
def test_calculate(self):
avg_t_in = (5.0 + 3.0) / 2
avg_t_out = (2.0 + 5.0 + 3.0 + 3.0) / 4
expected_flux = 1.0 / (avg_t_in + avg_t_out)
mistis_flux = \
self.mistis_minus_flux.calculate(self.mistis_minus_steps)
for flux in mistis_flux.values(): # all values are the same
assert_almost_equal(flux, expected_flux)
mstis_flux = \
self.mstis_minus_flux.calculate(self.mstis_minus_steps)
for flux in mstis_flux.values(): # all values are the same
assert_almost_equal(flux, expected_flux)
@raises(ValueError)
def test_bad_network(self):
# raises error if more than one transition shares a minus ensemble
# (flux cannot be calculated with multiple interface set minus move)
state_C = paths.CVDefinedVolume(self.cv_x, 0.5, 0.7)
trans_AB = self.mistis.transitions[(self.state_A, self.state_B)]
trans_BA = self.mistis.transitions[(self.state_B, self.state_A)]
interfaces_AB = trans_AB.interfaces
interfaces_BA = trans_BA.interfaces
interfaces_AC = trans_AB.interfaces
bad_mistis = paths.MISTISNetwork([
(self.state_A, interfaces_AB, self.state_B),
(self.state_B, interfaces_BA, self.state_A),
(self.state_A, interfaces_AC, state_C)
])
scheme = paths.DefaultScheme(bad_mistis)
scheme.build_move_decision_tree()
MinusMoveFlux(scheme)
class TestPathLengthHistogrammer(TISAnalysisTester):
def _check_network_results(self, network, hists):
results = {0: {(3.0,): 2, (5.0,): 1, (7.0,): 1},
1: {(5.0,): 2, (7.0,): 1, (12.0,): 1},
2: {(7.0,): 2, (12.0,): 2}}
ensembles_AB = self.sampling_ensembles_for_transition(network,
self.state_A,
self.state_B)
ensembles_BA = self.sampling_ensembles_for_transition(network,
self.state_B,
self.state_A)
for (key, dct) in results.items():
hist_dct_AB = hists[ensembles_AB[key]]._histogram
assert_equal(dict(hist_dct_AB), dct)
hist_dct_BA = hists[ensembles_BA[key]]._histogram
assert_equal(dict(hist_dct_BA), dct)
def test_calculate(self):
default_histogrammer = \
PathLengthHistogrammer(self.mistis.sampling_ensembles)
assert_equal(default_histogrammer.hist_parameters,
{'bin_width': 5, 'bin_range': (0, 1000)})
mistis_histogrammer = PathLengthHistogrammer(
ensembles=self.mistis.sampling_ensembles,
hist_parameters={'bin_width': 1, 'bin_range': (0, 10)}
)
mistis_hists = mistis_histogrammer.calculate(self.mistis_steps)
self._check_network_results(self.mistis, mistis_hists)
mstis_histogrammer = PathLengthHistogrammer(
ensembles=self.mstis.sampling_ensembles,
hist_parameters={'bin_width': 1, 'bin_range': (0, 10)}
)
mstis_hists = mstis_histogrammer.calculate(self.mstis_steps)
self._check_network_results(self.mstis, mstis_hists)
class TestFullHistogramMaxLambda(TISAnalysisTester):
def _check_transition_results(self, transition, hists):
raw_lambda_results = {
0: {-0.05: 0, 0.05: 2, 0.15: 1, 0.25: 1},
1: {-0.05: 0, 0.05: 0, 0.15: 2, 0.25: 1, 0.35: 0, 1.05: 1},
2: {-0.05: 0, 0.05: 0, 0.15: 0, 0.25: 2, 0.35: 0, 1.05: 2}
}
for (ens, result_dct) in raw_lambda_results.items():
hist = hists[transition.ensembles[ens]]()
for key in result_dct.keys():
assert_almost_equal(result_dct[key], hist(key))
def test_calculate(self):
mistis_AB = self.mistis.transitions[(self.state_A, self.state_B)]
mistis_AB_histogrammer = FullHistogramMaxLambdas(
transition=mistis_AB,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
mistis_AB_hists = mistis_AB_histogrammer.calculate(self.mistis_steps)
self._check_transition_results(mistis_AB, mistis_AB_hists)
mistis_BA = self.mistis.transitions[(self.state_B, self.state_A)]
mistis_BA_histogrammer = FullHistogramMaxLambdas(
transition=mistis_BA,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
mistis_BA_hists = mistis_BA_histogrammer.calculate(self.mistis_steps)
self._check_transition_results(mistis_BA, mistis_BA_hists)
mstis_AB = self.mstis.transitions[(self.state_A, self.state_B)]
mstis_AB_histogrammer = FullHistogramMaxLambdas(
transition=mstis_AB,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
mstis_AB_hists = mstis_AB_histogrammer.calculate(self.mstis_steps)
self._check_transition_results(mstis_AB, mstis_AB_hists)
mstis_BA = self.mstis.transitions[(self.state_B, self.state_A)]
mstis_BA_histogrammer = FullHistogramMaxLambdas(
transition=mstis_BA,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
mstis_BA_hists = mstis_BA_histogrammer.calculate(self.mstis_steps)
self._check_transition_results(mstis_BA, mstis_BA_hists)
@raises(RuntimeError)
def test_calculate_no_max_lambda(self):
mistis_AB = self.mistis.transitions[(self.state_A, self.state_B)]
modified_transition = paths.TISTransition(
stateA=mistis_AB.stateA,
stateB=mistis_AB.stateB,
interfaces=mistis_AB.interfaces.volumes,
orderparameter=mistis_AB.orderparameter
)
FullHistogramMaxLambdas(
transition=modified_transition,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
class TestConditionalTransitionProbability(TISAnalysisTester):
def _check_network_results(self, network, ctp_results):
results = {0: 0.0, 1: 0.25, 2: 0.5}
ensembles_AB = self.sampling_ensembles_for_transition(network,
self.state_A,
self.state_B)
ensembles_BA = self.sampling_ensembles_for_transition(network,
self.state_B,
self.state_A)
for ens_num in range(len(ensembles_AB)):
dct_AB = ctp_results[ensembles_AB[ens_num]]
result = results[ens_num]
if result != 0.0:
assert_equal(dct_AB[self.state_B], result)
if result != 1.0:
assert_equal(dct_AB[self.state_A], 1.0-result)
for ens_num in range(len(ensembles_BA)):
dct_BA = ctp_results[ensembles_BA[ens_num]]
result = results[ens_num]
if result != 0.0:
assert_equal(dct_BA[self.state_A], result)
if result != 1.0:
assert_equal(dct_BA[self.state_B], 1.0-result)
def test_calculate(self):
mistis_ctp_calc = ConditionalTransitionProbability(
ensembles=self.mistis.sampling_ensembles,
states=[self.state_A, self.state_B]
)
mistis_ctp = mistis_ctp_calc.calculate(self.mistis_steps)
self._check_network_results(self.mistis, mistis_ctp)
mstis_ctp_calc = ConditionalTransitionProbability(
ensembles=self.mstis.sampling_ensembles,
states=[self.state_A, self.state_B]
)
mstis_ctp = mstis_ctp_calc.calculate(self.mstis_steps)
self._check_network_results(self.mstis, mstis_ctp)
class TestTotalCrossingProbability(TISAnalysisTester):
def test_calculate(self):
# a bit of integration test, until we make a MaxLambdaStub
results = {0.0: 1.0, 0.1: 0.5, 0.2: 0.25, 0.3: 0.125,
0.5: 0.125, 1.0: 0.125}
mistis_AB = self.mistis.transitions[(self.state_A, self.state_B)]
mistis_AB_max_lambda = FullHistogramMaxLambdas(
transition=mistis_AB,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
mistis_AB_tcp = TotalCrossingProbability(mistis_AB_max_lambda)
tcp_AB = mistis_AB_tcp.calculate(self.mistis_steps)
for (x, result) in results.items():
assert_almost_equal(tcp_AB(x), result)
mistis_BA = self.mistis.transitions[(self.state_B, self.state_A)]
mistis_BA_max_lambda = FullHistogramMaxLambdas(
transition=mistis_BA,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
mistis_BA_tcp = TotalCrossingProbability(mistis_BA_max_lambda)
tcp_BA = mistis_BA_tcp.calculate(self.mistis_steps)
for (x, result) in results.items():
assert_almost_equal(tcp_AB(x), result)
mstis_AB = self.mstis.transitions[(self.state_A, self.state_B)]
mstis_AB_max_lambda = FullHistogramMaxLambdas(
transition=mstis_AB,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
mstis_AB_tcp = TotalCrossingProbability(mstis_AB_max_lambda)
tcp_AB = mstis_AB_tcp.calculate(self.mstis_steps)
for (x, result) in results.items():
assert_almost_equal(tcp_AB(x), result)
mstis_BA = self.mstis.transitions[(self.state_B, self.state_A)]
mstis_BA_max_lambda = FullHistogramMaxLambdas(
transition=mstis_BA,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
mstis_BA_tcp = TotalCrossingProbability(mstis_BA_max_lambda)
tcp_BA = mstis_BA_tcp.calculate(self.mstis_steps)
for (x, result) in results.items():
assert_almost_equal(tcp_BA(x), result)
class TestStandardTransitionProbability(TISAnalysisTester):
def _check_network_results(self, network, steps):
for transition in network.transitions.values():
max_lambda_calc = FullHistogramMaxLambdas(
transition=transition,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
std_tp = StandardTransitionProbability(
transition=transition,
tcp_method=TotalCrossingProbability(max_lambda_calc),
ctp_method=ConditionalTransitionProbability(
ensembles=[transition.ensembles[-1]],
states=[self.state_A, self.state_B]
)
)
results = std_tp.calculate(steps)
assert_almost_equal(results, 0.125)
def test_calculate(self):
self._check_network_results(self.mistis, self.mistis_steps)
self._check_network_results(self.mstis, self.mstis_steps)
def test_missing_ctp(self):
ensembles_AB = self.sampling_ensembles_for_transition(
self.mistis, self.state_A, self.state_B
)
ensembles_BA = self.sampling_ensembles_for_transition(
self.mistis, self.state_B, self.state_A
)
all_ensembles = ensembles_AB + ensembles_BA
replicas = range(len(all_ensembles))
set_trajectories = [self.trajs_AB[2]]*3 + [self.trajs_BA[2]]*3
zipped = list(zip(set_trajectories, all_ensembles, replicas))
mover_stub_mistis = MoverWithSignature(self.mistis.all_ensembles,
self.mistis.all_ensembles)
sample_sets = []
n_steps = 3
for step in range(n_steps):
sample_set = paths.SampleSet([
paths.Sample(trajectory=traj,
ensemble=ens,
replica=rep)
for (traj, ens, rep) in zipped
])
sample_set.sanity_check()
sample_sets.append(sample_set)
steps = self._make_fake_steps(sample_sets, mover_stub_mistis)
for transition in self.mistis.transitions.values():
max_lambda_calc = FullHistogramMaxLambdas(
transition=transition,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
std_tp = StandardTransitionProbability(
transition=transition,
tcp_method=TotalCrossingProbability(max_lambda_calc),
ctp_method=ConditionalTransitionProbability(
ensembles=[transition.ensembles[-1]],
states=[self.state_A, self.state_B]
)
)
results = std_tp.calculate(steps)
assert_almost_equal(results, 0.0)
class TestTransitionDictResults(TISAnalysisTester):
def setup(self):
super(TestTransitionDictResults, self).setup()
results_dict = {(self.state_A, self.state_B): 1,
(self.state_B, self.state_A): 2}
self.mistis_transition_dict = TransitionDictResults(
results_dict=results_dict,
network=self.mistis,
allow_sampling=False
)
self.mstis_transition_dict = TransitionDictResults(
results_dict=results_dict,
network=self.mstis,
allow_sampling=True
)
def test_iter(self):
assert_equal(set(pair for pair in self.mistis_transition_dict),
set(pair for pair in self.mstis_transition_dict))
def test_get_by_pair(self):
assert_equal(
self.mstis_transition_dict[(self.state_A, self.state_B)], 1
)
assert_equal(
self.mstis_transition_dict[(self.state_A, self.state_B)],
self.mistis_transition_dict[(self.state_A, self.state_B)]
)
assert_equal(
self.mstis_transition_dict[(self.state_B, self.state_A)], 2
)
assert_equal(
self.mstis_transition_dict[(self.state_B, self.state_A)],
self.mistis_transition_dict[(self.state_B, self.state_A)]
)
@raises(KeyError)
def test_get_bad_pair(self):
self.mistis_transition_dict[(self.state_A, self.state_A)]
def test_get_by_transition(self):
mistis_AB = self.mistis.transitions[(self.state_A, self.state_B)]
mstis_AB = self.mstis.transitions[(self.state_A, self.state_B)]
assert_equal(self.mistis_transition_dict[mistis_AB], 1)
assert_equal(self.mistis_transition_dict[mistis_AB],
self.mstis_transition_dict[mstis_AB])
def test_get_by_sampling_transition(self):
from_A = self.mstis.from_state[self.state_A]
from_B = self.mstis.from_state[self.state_B]
assert_equal(self.mstis_transition_dict[from_A], 1)
assert_equal(self.mstis_transition_dict[from_B], 2)
@raises(KeyError)
def test_bad_get_sampling_transition(self):
sampling_trans = self.mistis.sampling_transitions[0]
self.mistis_transition_dict[sampling_trans]
def test_to_pandas(self):
result = [[float("nan"), 1], [2, float("nan")]]
order = [self.state_A, self.state_B]
ordered_names = ["A", "B"]
pd_result = pd.DataFrame(data=result, index=ordered_names,
columns=ordered_names)
pd_mistis = self.mistis_transition_dict.to_pandas(order=order)
pd_mstis = self.mstis_transition_dict.to_pandas()
assert_frame_equal(pd_mistis, pd_mstis)
assert_frame_equal(pd_mistis, pd_result)
class TestTISAnalysis(TISAnalysisTester):
def _make_tis_analysis(self, network):
# NOTE: this might be useful as a description of the overall nested
# structure of these TISAnalysis objects. However, this isn't a
# practical way to do the calculation, because it repeats effort
# (hence the StandardTISAnalysis object)
tis_analysis = TISAnalysis(
network=network,
flux_method=DictFlux({
(t[0].stateA, t[0].interfaces[0]): 0.1
for t in network.special_ensembles['minus'].values()
}),
transition_probability_methods={
transition: StandardTransitionProbability(
transition=transition,
tcp_method=TotalCrossingProbability(
max_lambda_calc=FullHistogramMaxLambdas(
transition=transition,
hist_parameters={'bin_width': 0.1,
'bin_range': (-0.1, 1.1)}
)
),
ctp_method=ConditionalTransitionProbability(
ensembles=[transition.ensembles[-1]],
states=[self.state_A, self.state_B]
)
)
for transition in network.transitions.values()
}
)
return tis_analysis
def setup(self):
super(TestTISAnalysis, self).setup()
self.mistis_analysis = self._make_tis_analysis(self.mistis)
self.mistis_analysis.calculate(self.mistis_steps)
self.mstis_analysis = self._make_tis_analysis(self.mstis)
self.mstis_analysis.calculate(self.mstis_steps)
def test_bad_access_cached_results(self):
no_results = self._make_tis_analysis(self.mistis)
_ = self.mistis_analysis._access_cached_result('rate')
# use a try/except here instead of @raises so that we also test that
# the calculated version (previous line) works as expected
try:
no_results._access_cached_result('rate')
except AttributeError:
pass # this is the expected test result
def test_flux_matrix(self):
assert_equal(self.mistis_analysis.flux_matrix,
{(t.stateA, t.interfaces[0]): 0.1
for t in self.mistis.sampling_transitions})
assert_equal(self.mstis_analysis.flux_matrix,
{(t.stateA, t.interfaces[0]): 0.1
for t in self.mstis.sampling_transitions})
def test_flux(self):
for transition in self.mistis.sampling_transitions:
state = transition.stateA
innermost = transition.interfaces[0]
assert_equal(self.mistis_analysis.flux(state, innermost), 0.1)
for transition in self.mstis.sampling_transitions:
state = transition.stateA
innermost = transition.interfaces[0]
assert_equal(self.mstis_analysis.flux(state, innermost), 0.1)
def test_flux_through_state(self):
flux_dict = {(t.stateA, t.interfaces[0]): 0.1
for t in self.mistis.sampling_transitions}
flux_dict.update({(self.state_A, self.state_A): 0.5})
tis = TISAnalysis(
network=self.mistis,
flux_method=DictFlux(flux_dict),
transition_probability_methods={
transition: StandardTransitionProbability(
transition=transition,
tcp_method=TotalCrossingProbability(
max_lambda_calc=FullHistogramMaxLambdas(
transition=transition,
hist_parameters={'bin_width': 0.1,
'bin_range': (-0.1, 1.1)}
)
),
ctp_method=ConditionalTransitionProbability(
ensembles=[transition.ensembles[-1]],
states=[self.state_A, self.state_B]
)
)
for transition in self.mistis.transitions.values()
}
)
tis.calculate(self.mistis_steps)
trans_AB = self.mistis.transitions[(self.state_A, self.state_B)]
assert_equal(tis.flux(self.state_A, trans_AB.interfaces[0]), 0.1)
assert_equal(tis.flux(self.state_A), 0.5)
def test_state_fluxes(self):
for transition in self.mistis.sampling_transitions:
state = transition.stateA
innermost = transition.interfaces[0]
assert_equal(self.mistis_analysis.state_fluxes(state),
{(state, innermost): 0.1})
for transition in self.mstis.sampling_transitions:
state = transition.stateA
innermost = transition.interfaces[0]
assert_equal(self.mstis_analysis.state_fluxes(state),
{(state, innermost): 0.1})
def test_transition_probability_matrix(self):
pairs = [(self.state_A, self.state_B), (self.state_B, self.state_A)]
mistis_tp = self.mistis_analysis.transition_probability_matrix
mstis_tp = self.mstis_analysis.transition_probability_matrix
for trans_pair in pairs:
assert_almost_equal(mistis_tp[trans_pair], 0.125)
assert_almost_equal(mstis_tp[trans_pair], 0.125)
def test_transition_probability(self):
pairs = [(self.state_A, self.state_B), (self.state_B, self.state_A)]
for (vol_1, vol_2) in pairs:
assert_almost_equal(
self.mistis_analysis.transition_probability(vol_1, vol_2),
0.125
)
assert_almost_equal(
self.mstis_analysis.transition_probability(vol_1, vol_2),
0.125
)
def test_rate_matrix(self):
pairs = [(self.state_A, self.state_B), (self.state_B, self.state_A)]
mistis_rate = self.mistis_analysis.rate_matrix()
mstis_rate = self.mstis_analysis.rate_matrix()
for (vol_1, vol_2) in pairs:
assert_almost_equal(mistis_rate[(vol_1, vol_2)], 0.0125)
assert_almost_equal(mstis_rate[(vol_1, vol_2)], 0.0125)
def test_rate_matrix_calculation(self):
mistis_analysis = self._make_tis_analysis(self.mistis)
mistis_rate = mistis_analysis.rate_matrix(steps=self.mistis_steps)
pairs = [(self.state_A, self.state_B), (self.state_B, self.state_A)]
for (vol_1, vol_2) in pairs:
assert_almost_equal(mistis_rate[(vol_1, vol_2)], 0.0125)
def test_rate(self):
pairs = [(self.state_A, self.state_B), (self.state_B, self.state_A)]
for (vol_1, vol_2) in pairs:
assert_almost_equal(self.mistis_analysis.rate(vol_1, vol_2),
0.0125)
assert_almost_equal(self.mstis_analysis.rate(vol_1, vol_2),
0.0125)
class TestStandardTISAnalysis(TestTISAnalysis):
# inherit from TestTISAnalysis to retest all the same results
def _make_tis_analysis(self, network, steps=None):
tis_analysis = StandardTISAnalysis(
network=network,
flux_method=DictFlux({(t.stateA, t.interfaces[0]): 0.1
for t in network.sampling_transitions}),
max_lambda_calcs={t: {'bin_width': 0.1,
'bin_range': (-0.1, 1.1)}
for t in network.sampling_transitions},
steps=steps
)
return tis_analysis
def test_init_with_steps(self):
mistis_analysis = self._make_tis_analysis(self.mistis,
self.mistis_steps)
pairs = [(self.state_A, self.state_B), (self.state_B, self.state_A)]
for (vol_1, vol_2) in pairs:
assert_almost_equal(
mistis_analysis.rate_matrix()[(vol_1, vol_2)],
0.0125
)
def test_crossing_probability(self):
results = {
0: {0.0: 1.0, 0.1: 0.5, 0.2: 0.25},
1: {0.0: 1.0, 0.1: 1.0, 0.2: 0.5, 0.3: 0.25, 1.0: 0.25},
2: {0.0: 1.0, 0.1: 1.0, 0.2: 1.0, 0.3: 0.5, 1.0: 0.5}
}
def check_cp(transition, analysis, results):
# a little nested function to that does the actual check
for (i, ens) in enumerate(transition.ensembles):
cp_ens = analysis.crossing_probability(ens)
for x in results[i]:
assert_almost_equal(results[i][x], cp_ens(x))
for (network, analysis) in [(self.mistis, self.mistis_analysis),
(self.mstis, self.mstis_analysis)]:
for transition in network.transitions.values():
check_cp(transition, analysis, results)
def test_conditional_transition_probability(self):
expected_data = [[0.5, 0.5], [0.5, 0.5]]
states = ['A', 'B']
mistis_interfaces = ['A->B 2', 'B->A 2']
mstis_interfaces = ['Out A 2', 'Out B 2']
expected_mistis = pd.DataFrame(data=expected_data,
index=mistis_interfaces,
columns=states)
mistis_ctp = self.mistis_analysis.conditional_transition_probability
assert_equal(set(states), set(mistis_ctp.columns))
assert_equal(set(mistis_interfaces), set(mistis_ctp.index))
for iface in mistis_interfaces:
for state in states:
assert_equal(expected_mistis.loc[(iface, state)],
mistis_ctp.loc[(iface, state)])
expected_mstis = pd.DataFrame(data=expected_data,
index=mstis_interfaces,
columns=states)
mstis_ctp = self.mstis_analysis.conditional_transition_probability
assert_equal(set(states), set(mstis_ctp.columns))
assert_equal(set(mstis_interfaces), set(mstis_ctp.index))
for iface in mstis_interfaces:
for state in states:
assert_equal(expected_mstis.loc[(iface, state)],
mstis_ctp.loc[(iface, state)])
def test_total_crossing_probability(self):
results = {0.0: 1.0, 0.1: 0.5, 0.2: 0.25, 0.3: 0.125,
0.5: 0.125, 1.0: 0.125}
mistis_tcp = self.mistis_analysis.total_crossing_probability
for transition in self.mistis.transitions.values():
tcp = mistis_tcp[transition]
for x in results:
assert_almost_equal(results[x], tcp(x))
mstis_tcp = self.mstis_analysis.total_crossing_probability
for transition in self.mstis.transitions.values():
tcp = mstis_tcp[transition]
for x in results:
assert_almost_equal(results[x], tcp(x))
@raises(TypeError)
def test_bad_no_flux(self):
network = self.mistis
StandardTISAnalysis(
network=network,
max_lambda_calcs={t: {'bin_width': 0.1,
'bin_range': (-0.1, 1.1)}
for t in network.sampling_transitions}
)
@raises(RuntimeError)
def test_bad_max_lambda_calcs(self):
network = self.mistis
StandardTISAnalysis(
network=network,
flux_method=DictFlux({(t.stateA, t.interfaces[0]): 0.1
for t in network.sampling_transitions})
)
def test_init_ensemble_histogrammer_max_lambda(self):
network = self.mistis
max_lambda_calcs = {
t: FullHistogramMaxLambdas(
transition=t,
hist_parameters={'bin_width': 0.1, 'bin_range': (-0.1, 1.1)}
)
for t in network.sampling_transitions
}
tis_analysis = StandardTISAnalysis(
network=network,
flux_method=DictFlux({(t.stateA, t.interfaces[0]): 0.1
for t in network.sampling_transitions}),
max_lambda_calcs=max_lambda_calcs,
steps=self.mistis_steps
)
rate = tis_analysis.rate_matrix()
pairs = [(self.state_A, self.state_B), (self.state_B, self.state_A)]
for (vol_1, vol_2) in pairs:
assert_almost_equal(rate[(vol_1, vol_2)], 0.0125)
def test_with_minus_move_flux(self):
network = self.mstis
scheme = paths.DefaultScheme(network, engine=RandomMDEngine())
scheme.build_move_decision_tree()
# create the minus move steps
# `center` is the edge of the state/innermost interface
center = {self.state_A: 0.0, self.state_B: 1.0}
replica = {self.state_A: -1, self.state_B: -2}
minus_ensemble_to_mover = {m.minus_ensemble: m
for m in scheme.movers['minus']}
state_to_minus_ensemble = {ens.state_vol: ens
for ens in network.minus_ensembles}
minus_changes = []
# `delta` is the change on either side for in vs. out
for (state, delta) in [(self.state_A, 0.1), (self.state_B, -0.1)]:
minus_ens = state_to_minus_ensemble[state]
minus_mover = minus_ensemble_to_mover[minus_ens]
a_in = center[state] - delta
a_out = center[state] + delta
# note that these trajs are equivalent to minus move
# descriptions in TestMinusMoveFlux
seq_1 = [a_in] + [a_out]*2 + [a_in]*5 + [a_out]*5 + [a_in]
seq_2 = [a_in] + [a_out]*3 + [a_in]*3 + [a_out]*3 + [a_in]
for seq in [seq_1, seq_2]:
traj = make_1d_traj(seq)
assert_equal(minus_ens(traj), True)
samp = paths.Sample(trajectory=traj,
ensemble=minus_ens,
replica=replica[state])
_ = paths.SampleSet([samp])
change = paths.AcceptedSampleMoveChange(
samples=[samp],
mover=minus_mover,
details=paths.Details()
)
minus_changes.append(change)
active = self.mstis_steps[0].active
steps = []
cycle = -1
for m_change in minus_changes:
cycle += 1
active = active.apply_samples(m_change.samples)
step = paths.MCStep(mccycle=cycle,
active=active,
change=m_change)
steps.append(step)
for old_step in self.mstis_steps[1:]:
cycle += 1
active = active.apply_samples(old_step.change.samples)
step = paths.MCStep(mccycle=cycle,
active=active,
change=old_step.change)
steps.append(step)
analysis = StandardTISAnalysis(
network=self.mstis,
scheme=scheme,
max_lambda_calcs={t: {'bin_width': 0.1,
'bin_range': (-0.1, 1.1)}
for t in network.sampling_transitions},
steps=steps
)
# now we actually verify correctness
avg_t_in = (5.0 + 3.0) / 2
avg_t_out = (2.0 + 5.0 + 3.0 + 3.0) / 4
expected_flux = 1.0 / (avg_t_in + avg_t_out)
# NOTE: Apparently this approach screws up the TCP calculation. I
# think this is a problem in the fake data, not the simulation.
for flux in analysis.flux_matrix.values():
assert_almost_equal(flux, expected_flux)
@pytest.mark.parametrize('progress', ['all', 'default', 'none',
'tqdm', 'silent'])
def test_progress_setter(self, progress):
analysis = self.mstis_analysis
analysis.progress = progress
expected_flux, expected_ctp, expected_max_lambda = {
'all': (True, True, True),
'default': (True, True, False),
'none': (False, False, False),
'tqdm': (True, True, False),
'silent': (True, True, False),
}[progress]
flux_method = analysis.flux_method
assert flux_method.progress.keywords['leave'] is expected_flux
ctp_method = analysis.ctp_method
assert ctp_method.progress.keywords['leave'] is expected_ctp
max_lambda_methods = [tcp.max_lambda_calc
for tcp in analysis.tcp_methods.values()]
for max_lambda in max_lambda_methods:
prog = max_lambda.progress
assert prog.keywords['leave'] is expected_max_lambda
| mit |
jaidevd/scikit-learn | sklearn/metrics/tests/test_regression.py | 49 | 8058 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises, assert_raises_regex
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true),
np.log(1 + y_pred)))
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_squared_log_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [-1.], [-1.])
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test__check_reg_targets_exception():
invalid_multioutput = 'this_value_is_not_valid'
expected_message = ("Allowed 'multioutput' string values are.+"
"You provided multioutput={!r}".format(
invalid_multioutput))
assert_raises_regex(ValueError, expected_message,
_check_reg_targets,
[1, 2, 3],
[[1], [2], [3]],
invalid_multioutput)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput='raw_values')
assert_array_almost_equal(msle, msle2, decimal=2)
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput=[0.3, 0.7])
assert_almost_equal(msle, msle2, decimal=2)
| bsd-3-clause |
wkerzendorf/chiantipy | chiantipy/chianti/core/Continuum.py | 1 | 47815 | import os
import types
import numpy as np
from scipy import interpolate
try:
from matplotlib.delaunay.triangulate import Triangulation
except:
from scikits.delaunay.triangulate import Triangulation
import chianti.data as chdata
import chianti.util as util
import chianti.constants as const
ip = chdata.Ip
MasterList = chdata.MasterList
#import chianti
class continuum:
'''The top level class for continuum calculations.
includes methods for the calculation of the free-free and free-bound continua.'''
def __init__(self, ionStr, temperature=None, density=None):
nameDict = util.convertName(ionStr)
self.Z = nameDict['Z']
self.Ion = nameDict['Ion']
self.IonStr = ionStr
self.Dielectronic = 0
self.Defaults = chdata.Defaults
self.AbundanceName = self.Defaults['abundfile']
self.IoneqName = self.Defaults['ioneqfile']
#
# ip in eV, reading Ip of next lower level, needed for freeBound
if self.Ion > 1:
self.Ip=ip[self.Z-1, self.Ion-2]
else:
print ' in continuum, trying to use the neutral ion'
return
#
if type(temperature) != types.NoneType:
self.Temperature = np.array(temperature,'float64')
#
if type(density) != types.NoneType:
self.Density = np.asarray(density,'float64')
#
#----------------------------------------------------------------------------------------
#
def freeBoundEmiss(self, wvl, verner=1):
'''Calculates the free-bound (radiative recombination) continuum emissivity of an ion.
Uses the Gaunt factors of Karzas, W.J, Latter, R, 1961, ApJS, 6, 167
for recombination to the ground level, the photoionization cross sections of
Verner and Yakovlev, 1995, A&ASS, 109, 125
are used to develop the free-bound cross section
provides emissivity = ergs cm^-2 s^-1 str^-1 Angstrom ^-1 for an individual ion
does not include the elemental abundance or ionization fraction
the specified ion is the target ion'''
#
wvl = np.asarray(wvl, 'float64')
if hasattr(self, 'Temperature'):
temperature = self.Temperature
else:
print ' temperature undefined'
return
#
if hasattr(self, 'Fblvl'):
fblvl = self.Fblvl
else:
fblvlname = util.zion2filename(self.Z,self.Ion-1)+'.fblvl'
self.Fblvl = util.fblvlRead(fblvlname)
fblvl = self.Fblvl
# need some data for the recombining ion
if hasattr(self, 'rFblvl'):
rFblvl = self.rFblvl
else:
if self.Ion == self.Z+1:
# then we looking for the bare ion
rFblvl = {'mult':[1., 1.]}
else:
rfblvlname = util.zion2filename(self.Z,self.Ion)+'.fblvl' # previously self.Ion)
self.rFblvl = util.fblvlRead(rfblvlname)
rFblvl = self.rFblvl
# 6/9/2010 the recombining ion is the present ion
#
# for the ionization potential, Ip, must use that of the recombined ion
ipcm = self.Ip/const.invCm2Ev
#
nlvls = len(fblvl['lvl'])
# pqn = principle quantum no. n
pqn = fblvl['pqn']
# l is angular moment quantum no. L
l = fblvl['l']
# energy level in inverse cm
ecm = fblvl['ecm']
for i in range(nlvls):
print ' lvl ecm wvl = ',i, ecm[i], 1.e+8/(ipcm-ecm[i])
# statistical weigths/multiplicities
mult = fblvl['mult']
multr = rFblvl['mult']
#
#
ipcm = self.Ip/const.invCm2Ev
wecm = ipcm-ecm
#
# get Karzas-Latter Gaunt factors
try:
klgfb = self.Klgfb
except:
self.Klgfb = util.klgfbRead()
klgfb = self.Klgfb
#
nWvl = wvl.size
nTemp = temperature.size
#
if verner:
lvl1 = 1
else:
lvl1 = 0
#
if (nTemp > 1) and (nWvl > 1):
mask = np.zeros((nlvls,nTemp,nWvl),'Bool')
fbrate = np.zeros((nlvls,nTemp,nWvl),'float64')
expf = np.zeros((nlvls,nTemp,nWvl),'float64')
ratg = np.zeros((nlvls),'float64')
fbRate = np.zeros((nTemp,nWvl),'float64')
if verner:
self.vernerCross(wvl)
vCross = self.VernerCross
#
ratg[0] = float(mult[0])/float(multr[0])
ipLvlEv = self.Ip - const.invCm2Ev*ecm[0]
ipLvlErg = const.ev2Erg*ipLvlEv
for itemp in range(nTemp):
mask[0,itemp] = 1.e+8/wvl < (ipcm - ecm[0])
expf[0,itemp] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature[itemp]))
fbrate[0,itemp] = (const.planck*const.light/(1.e-8*wvl))**5*const.verner*ratg[0]*expf[0,itemp]*vCross/temperature[itemp]**1.5
for ilvl in range(lvl1,nlvls):
ipLvlEv = self.Ip - const.invCm2Ev*ecm[ilvl]
ipLvlErg = const.ev2Erg*ipLvlEv
scaledE = np.log(const.ev2Ang/(ipLvlEv*wvl))
thisGf = klgfb['klgfb'][pqn[ilvl]-1, l[ilvl]]
spl = interpolate.splrep(klgfb['pe'], thisGf)
gf = np.exp(interpolate.splev(scaledE, spl))
ratg[ilvl] = float(mult[ilvl])/float(multr[0]) # ratio of statistical weights
#
for itemp in range(nTemp):
expf[ilvl] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature[itemp]))
ipLvlErg = const.ev2Erg*ipLvlEv
expf[ilvl,itemp] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature[itemp]))
mask[ilvl,itemp] = 1.e+8/wvl < (ipcm - ecm[ilvl])
fbrate[ilvl,itemp] = const.freeBound*ratg[ilvl]*(ipLvlErg**2/float(pqn[ilvl]))*gf*expf[ilvl,itemp]/(temperature[itemp]**1.5*(wvl)**2)
fbrma = np.ma.array(fbrate)
fbrma.mask = mask
fbrma.fill_value = 0.
fbRate = (fbrma).sum(axis=0)
fbRate.fill_value = 0.
self.FreeBoundEmiss = {'emiss':fbRate.data, 'temperature':temperature,'wvl':wvl}
#
elif (nTemp == 1) and (nWvl > 1):
mask = np.zeros((nlvls,nWvl),'Bool')
fbrate = np.zeros((nlvls,nWvl),'float64')
expf = np.zeros((nlvls,nWvl),'float64')
ratg = np.zeros((nlvls),'float64')
if verner:
self.vernerCross(wvl)
vCross = self.VernerCross
#
mask[0] = 1.e+8/wvl < (ipcm - ecm[0])
ratg[0] = float(mult[0])/float(multr[0])
ipLvlEv = self.Ip - const.invCm2Ev*ecm[0]
print ' verner ipLvlEv =', ipLvlEv
ipLvlErg = const.ev2Erg*ipLvlEv
expf[0] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature))
fbrate[0] = (const.planck*const.light/(1.e-8*wvl))**5*const.verner*ratg[0]*vCross/temperature**1.5
# print ' expf verner = ', expf[0]
# print ' fbrate verner = ', fbrate[0]
for ilvl in range(lvl1,nlvls):
# scaled energy is relative to the ionization potential of each individual level
ipLvlEv = self.Ip - const.invCm2Ev*ecm[ilvl]
# print ' ilvl, ipLvlEv = ', ilvl, ipLvlEv
scaledE = np.log(const.ev2Ang/(ipLvlEv*wvl))
thisGf = klgfb['klgfb'][pqn[ilvl]-1, l[ilvl]]
spl = interpolate.splrep(klgfb['pe'], thisGf)
gf = np.exp(interpolate.splev(scaledE, spl))
mask[ilvl] = 1.e+8/wvl < (ipcm - ecm[ilvl])
ratg[ilvl] = float(mult[ilvl])/float(multr[0]) # ratio of statistical weights
ipLvlErg = const.ev2Erg*ipLvlEv
expf[ilvl] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature))
fbrate[ilvl] = const.freeBound*ratg[ilvl]*(ipLvlErg**2/float(pqn[ilvl]))*gf/(temperature**1.5*(wvl)**2)
# print ' ilvl, scaledE = ', ilvl, scaledE
# print ' ilvl, thisGf = ', ilvl, thisGf
# print ' ilvl, gf = ', ilvl, gf
# print ' ilvl ratg = ', ilvl, ratg[ilvl]
# print ' ilvl mask = ', ilvl, mask[ilvl]
# print ' ilvl expf = ', ilvl, expf[ilvl]
# print ' fbrate = ', ilvl, fbrate[ilvl]
fbrma = np.ma.array(fbrate)
fbrma.mask = mask
fbrma.fill_value = 0.
# factor of 1.e-8 converts to Angstrom^-1, otherwise it would be cm^-1
fbRate = (expf*fbrma).sum(axis=0)
fbRate.fill_value = 0.
self.FreeBoundEmiss = {'emiss':fbRate.data, 'temperature':temperature,'wvl':wvl}
else:
mask = np.zeros((nlvls,nTemp),'Bool')
fbrate = np.zeros((nlvls,nTemp),'float64')
expf = np.zeros((nlvls,nTemp),'float64')
ratg = np.zeros((nlvls),'float64')
if verner:
self.vernerCross(wvl)
vCross = self.VernerCross
#
mask[0] = 1.e+8/wvl < (ipcm - ecm[0])
ratg[0] = float(mult[0])/float(multr[0])
ipLvlEv = self.Ip - const.invCm2Ev*ecm[0]
ipLvlErg = const.ev2Erg*ipLvlEv
expf[0] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature))
fbrate[0] = (const.planck*const.light/(1.e-8*wvl))**5*const.verner*ratg[0]*expf[0]*vCross/temperature**1.5
for ilvl in range(lvl1,nlvls):
# scaled energy is relative to the ionization potential of each individual level
ipLvlEv = self.Ip - const.invCm2Ev*ecm[ilvl]
scaledE = np.log(const.ev2Ang/(ipLvlEv*wvl))
thisGf = klgfb['klgfb'][pqn[ilvl]-1, l[ilvl]]
spl = interpolate.splrep(klgfb['pe'], thisGf)
gf = np.exp(interpolate.splev(scaledE, spl))
mask[ilvl] = 1.e+8/wvl < (ipcm - ecm[ilvl])
ratg[ilvl] = float(mult[ilvl])/float(multr[0]) # ratio of statistical weights
ipLvlErg = const.ev2Erg*ipLvlEv
expf[ilvl] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature))
fbrate[ilvl] = const.freeBound*ratg[ilvl]*(ipLvlErg**2/float(pqn[ilvl]))*expf[ilvl]*gf/(temperature**1.5*(wvl)**2)
fbrma = np.ma.array(fbrate)
fbrma.mask = mask
fbrma.fill_value = 0.
# factor of 1.e-8 converts to Angstrom^-1, otherwise it would be cm^-1
fbRate = (fbrma).sum(axis=0)
fbRate.fill_value = 0.
self.FreeBoundEmiss = {'emiss':fbRate.data, 'temperature':temperature,'wvl':wvl}
#
# ----------------------------------------------------------------------------
#
def freeBound(self, wvl, verner=1):
'''to calculate the free-bound (radiative recombination) continuum rate coefficient of an ion, where
the ion is taken to be the recombined ion,
including the elemental abundance and the ionization equilibrium population
uses the Gaunt factors of Karzas, W.J, Latter, R, 1961, ApJS, 6, 167
for recombination to the ground level, the photoionization cross sections of Verner and Yakovlev, 1995, A&ASS, 109, 125
are used to develop the free-bound cross section
includes the elemental abundance and the ionization fraction
provides emissivity = ergs cm^-2 s^-1 str^-1 Angstrom ^-1'''
wvl = np.asarray(wvl, 'float64')
#
# ip in eV, for freebound
if self.Ion > 1 :
self.Ip=ip[self.Z-1, self.Ion-2]
else:
print ' in freeBound, trying to use the neutral ion as the recombining ion'
self.FreeBound={}
return
try:
temperature = self.Temperature
except:
print ' temperature undefined'
return
# the recombined ion contains that data for fblvl
try:
fblvl = self.Fblvl
except:
fblvlname = util.zion2filename(self.Z,self.Ion-1)+'.fblvl'
self.Fblvl = util.fblvlRead(fblvlname)
fblvl = self.Fblvl
# in case there is no fblvl file
if 'errorMessage' in fblvl.keys():
self.FreeBound = fblvl
return
# need data for the current/recombining ion
try:
rFblvl = self.rFblvl
except:
if self.Ion+1 == self.Z:
# this is a bare ion
rFblvl = {'mult':[1., 1.]}
else:
rfblvlname = util.zion2filename(self.Z,self.Ion)+'.fblvl'
self.rFblvl = util.fblvlRead(fblvlname)
rFblvl = self.rFblvl
if 'errorMessage' in rFblvl.keys():
self.FreeBound = fblvl
return
try:
gIoneq = self.IoneqOne
except:
self.ioneqOne()
gIoneq = self.IoneqOne
#
if not np.any(gIoneq) > 0:
self.FreeBound = {'errorMessage':' no non-zero values of ioneq'}
return
try:
abund = self.Abundance
except:
self.AbundanceAll = util.abundanceRead(abundancename = self.AbundanceName)
self.Abundance = self.AbundanceAll['abundance'][self.Z-1]
abund = self.Abundance
#
ipcm = self.Ip/const.invCm2Ev
iperg = self.Ip*const.ev2Erg
#
nlvls = len(fblvl['lvl'])
# pqn = principle quantum no. n
pqn = np.asarray(fblvl['pqn'], 'float64')
# l is angular moment quantum no. L
l = fblvl['l']
# energy level in inverse cm
ecm = fblvl['ecm']
eerg = np.asarray(ecm, 'float64')*const.invCm2Erg
# get log of photon energy relative to the ionization potential
mult = fblvl['mult']
multr = rFblvl['mult']
# ipcm = self.Ip/const.invCm2Ev
#
#
wecm=1.e+8/(ipcm-ecm)
#
# get karzas-latter Gaunt factors
try:
klgfb = self.Klgfb
except:
self.Klgfb = util.klgfbRead()
klgfb = self.Klgfb
#
nWvl = wvl.size
nTemp = temperature.size
# statistical weigths/multiplicities
mult = fblvl['mult']
multr = rFblvl['mult']
#
if verner:
lvl1 = 1
else:
lvl1 = 0
#
nWvl = wvl.size
nTemp = temperature.size
#
if (nTemp > 1) and (nWvl > 1):
mask = np.zeros((nlvls,nTemp,nWvl),'Bool')
fbrate = np.zeros((nlvls,nTemp,nWvl),'float64')
expf = np.zeros((nlvls,nTemp,nWvl),'float64')
ratg = np.zeros((nlvls),'float64')
fbRate = np.zeros((nTemp,nWvl),'float64')
if verner:
self.vernerCross(wvl)
vCross = self.VernerCross
ratg[0] = float(mult[0])/float(multr[0])
ipLvlEv = self.Ip - const.invCm2Ev*ecm[0]
ipLvlErg = const.ev2Erg*ipLvlEv
ipLvlErg = iperg - eerg[0]
for itemp in range(nTemp):
mask[0,itemp] = 1.e+8/wvl < (ipcm - ecm[0])
expf[0,itemp] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature[itemp]))
fbrate[0,itemp] = (const.planck*const.light/(1.e-8*wvl))**5*const.verner*gIoneq[itemp]*ratg[0]*expf[0,itemp]*vCross/temperature[itemp]**1.5
for ilvl in range(lvl1,nlvls):
ipLvlEv = self.Ip - const.invCm2Ev*ecm[ilvl]
ipLvlErg = const.ev2Erg*ipLvlEv
ipLvlErg = iperg - eerg[0]
scaledE = np.log(const.ev2Ang/(ipLvlEv*wvl))
thisGf = klgfb['klgfb'][pqn[ilvl]-1, l[ilvl]]
spl = interpolate.splrep(klgfb['pe'], thisGf)
gf = np.exp(interpolate.splev(scaledE, spl))
ratg[ilvl] = float(mult[ilvl])/float(multr[0]) # ratio of statistical weights
#
for itemp in range(nTemp):
expf[ilvl] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature[itemp]))
ipLvlErg = const.ev2Erg*ipLvlEv
expf[ilvl,itemp] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature[itemp]))
mask[ilvl,itemp] = 1.e+8/wvl < (ipcm - ecm[ilvl])
fbrate[ilvl,itemp] = const.freeBound*gIoneq[itemp]*ratg[ilvl]*(ipLvlErg**2/float(pqn[ilvl]))*gf*expf[ilvl,itemp]/(temperature[itemp]**1.5*(wvl)**2)
fbrma = np.ma.array(fbrate)
fbrma.mask = mask
fbrma.fill_value = 0.
fbRate = (fbrma).sum(axis=0)
fbRate.fill_value = 0.
self.FreeBound = {'rate':abund*fbRate.data, 'temperature':temperature,'wvl':wvl}
#
elif (nTemp == 1) and (nWvl > 1):
mask = np.zeros((nlvls,nWvl),'Bool')
fbrate = np.zeros((nlvls,nWvl),'float64')
expf = np.zeros((nlvls,nWvl),'float64')
ratg = np.zeros((nlvls),'float64')
if verner:
self.vernerCross(wvl)
vCross = self.VernerCross
# mask is true for bad values
mask[0] = 1.e+8/wvl < (ipcm - ecm[0])
ratg[0] = float(mult[0])/float(multr[0])
ipLvlEv = self.Ip - const.invCm2Ev*ecm[0]
ipLvlErg = const.ev2Erg*ipLvlEv
# ipLvlErg = iperg - eerg[0]
# print ' verner ipLvlErg = ', ipLvlErg, ipLvlErg/(const.boltzmann*temperature)
expf[0] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature))
fbrate[0] = (const.planck*const.light/(1.e-8*wvl))**5*const.verner*ratg[0]*expf[0]*vCross/temperature**1.5
#
for ilvl in range(lvl1,nlvls):
# scaled energy is relative to the ionization potential of each individual level
ipLvlEv = self.Ip - const.invCm2Ev*ecm[ilvl]
scaledE = np.log(const.ev2Ang/(ipLvlEv*wvl))
thisGf = klgfb['klgfb'][pqn[ilvl]-1, l[ilvl]]
spl = interpolate.splrep(klgfb['pe'], thisGf)
gf = np.exp(interpolate.splev(scaledE, spl))
mask[ilvl] = 1.e+8/wvl < (ipcm - ecm[ilvl])
ratg[ilvl] = float(mult[ilvl])/float(multr[0]) # ratio of statistical weights
ipLvlErg = const.ev2Erg*ipLvlEv
# ipLvlErg = iperg - eerg[ilvl]
# print ' ilvl epLvlErg = ', ipLvlErg, ipLvlErg/(const.boltzmann*temperature)
expf[ilvl] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature))
fbrate[ilvl] = const.freeBound*ratg[ilvl]*(ipLvlErg**2/float(pqn[ilvl]))*expf[ilvl]*gf/(temperature**1.5*(wvl)**2)
# fbrate[ilvl] = const.freeBound*ratg[ilvl]*(eerg[ilvl]**2/float(pqn[ilvl]))*expf[ilvl]*gf/(temperature**1.5*(wvl)**2)
fbrma = np.ma.array(fbrate)
fbrma.mask = mask
fbrma.fill_value = 0.
# factor of 1.e-8 converts to Angstrom^-1, otherwise it would be cm^-1
fbRate = abund*gIoneq*fbrma.sum(axis=0)
fbRate.fill_value = 0.
self.FreeBound = {'rate':fbRate.data, 'temperature':temperature,'wvl':wvl}
#elif (nTemp > 1) and (nWvl == 1):
else:
mask = np.zeros((nlvls,nTemp),'Bool')
fbrate = np.zeros((nlvls,nTemp),'float64')
expf = np.zeros((nlvls,nTemp),'float64')
ratg = np.zeros((nlvls),'float64')
if verner:
self.vernerCross(wvl)
vCross = self.VernerCross
mask[0] = 1.e+8/wvl < (ipcm - ecm[0])
ratg[0] = float(mult[0])/float(multr[0])
ipLvlEv = self.Ip - const.invCm2Ev*ecm[0]
ipLvlErg = const.ev2Erg*ipLvlEv
expf[0] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature))
fbrate[0] = (const.planck*const.light/(1.e-8*wvl))**5*const.verner*ratg[0]*expf[0]*vCross/temperature**1.5
for ilvl in range(lvl1,nlvls):
# scaled energy is relative to the ionization potential of each individual level
ipLvlEv = self.Ip - const.invCm2Ev*ecm[ilvl]
scaledE = np.log(const.ev2Ang/(ipLvlEv*wvl))
thisGf = klgfb['klgfb'][pqn[ilvl]-1, l[ilvl]]
spl = interpolate.splrep(klgfb['pe'], thisGf)
gf = np.exp(interpolate.splev(scaledE, spl))
mask[ilvl] = 1.e+8/wvl < (ipcm - ecm[ilvl])
ratg[ilvl] = float(mult[ilvl])/float(multr[0]) # ratio of statistical weights
ipLvlErg = const.ev2Erg*ipLvlEv
expf[ilvl] = np.exp((ipLvlErg - 1.e+8*const.planck*const.light/wvl)/(const.boltzmann*temperature))
fbrate[ilvl] = const.freeBound*ratg[ilvl]*(ipLvlErg**2/float(pqn[ilvl]))*expf[ilvl]*gf/(temperature**1.5*(wvl)**2)
fbrma = np.ma.array(fbrate)
fbrma.mask = mask
fbrma.fill_value = 0.
# factor of 1.e-8 converts to Angstrom^-1, otherwise it would be cm^-1
fbRate = abund*gIoneq*(fbrma).sum(axis=0)
fbRate.fill_value = 0.
self.FreeBound = {'rate':fbRate.data, 'temperature':temperature,'wvl':wvl}
#
# ----------------------------------------------------------------------------------------
#
#
def freeBoundLoss(self):
'''to calculate the free-bound (radiative recombination) energy loss rate coefficient of an ion,
the ion is taken to be the recombined iion,
including the elemental abundance and the ionization equilibrium population
uses the Gaunt factors of Karzas, W.J, Latter, R, 1961, ApJS, 6, 167
provides rate = ergs cm^-2 s^-1
'''
if hasattr(self, 'Temperature'):
temperature = self.Temperature
else:
print ' temperature undefined'
return
#
if self.Ion > 1:
self.Ip=ip[self.Z-1, self.Ion-2]
else:
print ' in freeBound, trying to use the neutral ion as the recombining ion'
self.FreeBound={}
return
#
if hasattr(self, 'Fblvl'):
fblvl = self.Fblvl
else:
fblvlname = util.zion2filename(self.Z,self.Ion-1)+'.fblvl'
if os.path.isfile(fblvlname):
self.Fblvl = util.fblvlRead(fblvlname)
fblvl = self.Fblvl
else:
# print ' cannot find file - ', fblvlname
# can't compute this ion
fblvl = {}
# need some data for the recombining/target ion
if hasattr(self, 'rFblvl'):
rFblvl = self.rFblvl
else:
if self.Ion == self.Z+1:
# then we looking for the bare ion
rFblvl = {'mult':[1., 1.]}
else:
rfblvlname = util.zion2filename(self.Z,self.Ion)+'.fblvl'
if os.path.isfile(rfblvlname):
self.rFblvl = util.fblvlRead(rfblvlname)
rFblvl = self.rFblvl
else:
# print ' cannot find file - ', rfblvlname
# can't do this ion
rFblvl = {}
if hasattr(self, 'IoneqOne'):
gIoneq = self.IoneqOne
else:
self.ioneqOne()
gIoneq = self.IoneqOne
#
if hasattr(self, 'Abundance'):
abund = self.Abundance
else:
self.AbundanceAll = util.abundanceRead(abundancename = self.AbundanceName)
self.Abundance = self.AbundanceAll['abundance'][self.Z-1]
abund = self.Abundance
#
#ipcm = self.Ip/const.invCm2Ev
# get log of photon energy relative to the ionization potential
#
#
#wecm=1.e+8/(ipcm-ecm)
#
# get karzas-latter Gaunt factors
if hasattr(self, 'Klgfb'):
klgfb = self.Klgfb
else:
self.Klgfb = util.klgfbRead()
klgfb = self.Klgfb
#
nTemp = temperature.size
# statistical weigths/multiplicities
#
#
#wecm=1.e+8/(ipcm-ecm)
#
# sometime the rFblvl file does not exist
if fblvl.has_key('mult') and rFblvl.has_key('mult'):
#
nlvls = len(fblvl['lvl'])
# pqn = principle quantum no. n
pqn = fblvl['pqn']
# l is angular moment quantum no. L
l = fblvl['l']
# energy level in inverse cm
ecm = fblvl['ecm']
mult = fblvl['mult']
multr = rFblvl['mult']
fbrate = np.zeros((nlvls,nTemp),'float64')
ratg = np.zeros((nlvls),'float64')
for ilvl in range(nlvls):
# scaled energy is relative to the ionization potential of each individual level
# will add the average energy of a free electron to this to get typical photon energy to
# evaluate the gaunt factor
hnuEv = 1.5*const.boltzmann*temperature/const.ev2Erg
ipLvlEv = self.Ip - const.invCm2Ev*ecm[ilvl]
scaledE = np.log(hnuEv/ipLvlEv)
thisGf = klgfb['klgfb'][pqn[ilvl]-1, l[ilvl]]
spl = interpolate.splrep(klgfb['pe'], thisGf)
gf = np.exp(interpolate.splev(scaledE, spl))
ratg[ilvl] = float(mult[ilvl])/float(multr[0]) # ratio of statistical weights
ipLvlErg = const.ev2Erg*ipLvlEv
fbrate[ilvl] = ratg[ilvl]*(ipLvlErg**2/float(pqn[ilvl]))*gf/np.sqrt(temperature)
fbRate = abund*gIoneq*const.freeBoundLoss*(fbrate.sum(axis=0))
else:
fbRate = np.zeros((nTemp),'float64')
self.FreeBoundLoss = {'rate':fbRate, 'temperature':temperature}
#
# ----------------------------------------------------------------------------------------
#
def vernerCross(self,wvl):
'''Calculates the photoionization cross section.
The data are from Verner and Yakovlev
the cross section refers to the next lower ionization stage'''
try:
vernerDat = self.VernerDat
except:
self.VernerDat = util.vernerRead()
vernerDat = self.VernerDat
z = self.Z
stage = self.Ion
ip = self.Ip
ipcm = self.Ip/const.invCm2Ev
ecm = self.Fblvl['ecm']
#
en = const.ev2Ang/wvl
y = en/vernerDat['e0'][z,stage-1]
fy= vernerDat['sig0'][z,stage-1]*((y - 1.)**2 + vernerDat['yw'][z,stage-1]**2) * y**(-5.5 - vernerDat['l'][z,stage-1] + 0.5*vernerDat['p'][z,stage-1]) * (1. + np.sqrt(y/vernerDat['ya'][z,stage-1]))**(-vernerDat['p'][z,stage-1])
# mask = en < vernerDat['eth'][z,stage]
# will use Chianti values for energy of ground level
mask = (1.e+8/wvl) < (ipcm - ecm[0])
vCross = np.ma.array(fy)
vCross.mask = mask
vCross.fill_value = 0.
# cross-section will be output in cm^2
self.VernerCross = vCross*1.e-18
#
# ----------------------------------------------------------------------------------------
#
def freeFree(self, wvl):
'''Calculates the free-free emission for a single ion.
Includes elemental abundance and ionization equilibrium population.
Uses Itoh where valid and Sutherland elsewhere'''
if self.Ion == 1:
self.FreeFree = {'errorMessage':' freefree is not produced by neutrals'}
else:
wvl = np.asarray(wvl, 'float64')
ffs = self.sutherland(wvl)
#
ffi = self.itoh(wvl)
ff = ffs['suthFf']
if not 'errorMessage' in ffi.keys():
iff = ffi['itohFf']
itohMask = np.logical_not(iff.mask)
ff[itohMask] = iff[itohMask]
#
try:
gIoneq = self.IoneqOne
except:
self.ioneqOne()
gIoneq = self.IoneqOne
if type(gIoneq) == types.FloatType:
# only one temperature specified
if gIoneq == 0.:
ffRate = np.zeros(wvl.size)
self.FreeFree = {'rate':ffRate, 'temperature':self.Temperature,'wvl':wvl}
return
else:
if gIoneq.sum() == 0.:
ffRate = np.zeros((self.Temperature.size, wvl.size), 'float64')
self.FreeFree = {'rate':ffRate, 'temperature':self.Temperature,'wvl':wvl}
return
# print ' gIoneq = ', gIoneq
if wvl.size > 1:
gIoneq = gIoneq.repeat(wvl.size).reshape(self.Temperature.size,wvl.size)
#
try:
abund = self.Abundance
except:
self.AbundanceAll = util.abundanceRead(abundancename = self.AbundanceName)
self.Abundance = self.AbundanceAll['abundance'][self.Z-1]
abund = self.Abundance
#
ffRate = (const.freeFree*(self.Z)**2*abund*gIoneq*ff).squeeze()
self.FreeFree = {'rate':ffRate, 'temperature':self.Temperature,'wvl':wvl}
#
# ----------------------------------------------------------------------------------------
#
def freeFreeEmiss(self, wvl):
'''Calculates the free-free emissivity for a single ion.
does not include element abundance or ionization fraction
Uses Itoh where valid and Sutherland elsewhere'''
if self.Ion == 1:
self.FreeFreeEmiss = {'errorMessage':' freefree is not produced by neutrals'}
else:
wvl = np.asarray(wvl, 'float64')
ffs = self.sutherland(wvl)
ff = ffs['suthFf']
ffi = self.itoh(wvl)
if 'errorMessage' not in ffi.keys():
iff = ffi['itohFf']
itohMask = np.logical_not(iff.mask)
ff[itohMask] = iff[itohMask]
#
ffRate = (const.freeFree*(self.Z)**2*ff).squeeze()
self.FreeFree = {'rate':ffRate, 'temperature':self.Temperature,'wvl':wvl}
#
# ----------------------------------------------------------------------------------------
#
def freeFreeLoss(self):
'''Calculates the total free-free emission for a single ion.
Includes elemental abundance and ionization equilibrium population.
Uses Itoh where valid and Sutherland elsewhere'''
#
if self.Ion == 1:
self.FreeFree = {'errorMessage':' freefree is not produced by neutrals'}
else:
if hasattr(self, 'Temperature'):
temperature = self.Temperature
else:
print ' temperature undefined'
return
if hasattr(self, 'Gffint'):
gffint = self.Gffint['gffint']
g2 = self.Gffint['g2']
else:
self.Gffint = util.gffintRead()
gffint = self.Gffint['gffint']
g2 = self.Gffint['g2']
#
gamma2 = self.Ip*const.ev2Erg/(const.boltzmann*temperature)
#
spl = interpolate.splrep(g2, gffint)
gff = interpolate.splev(np.log(gamma2), spl)
#
if hasattr(self, 'IoneqOne'):
gIoneq = self.IoneqOne
else:
self.ioneqOne()
gIoneq = self.IoneqOne
#
if hasattr(self, 'Abundance'):
abund = self.Abundance
else:
self.AbundanceAll = util.abundanceRead(abundancename = self.AbundanceName)
self.Abundance = self.AbundanceAll['abundance'][self.Z-1]
abund = self.Abundance
#
ffRate = const.freeFreeLoss*(self.Z)**2*abund*gIoneq*gff*np.sqrt(temperature)
self.FreeFreeLoss = {'rate':ffRate, 'temperature':temperature}
#
# ----------------------------------------------------------------------------------------
#
def klgfbInterp(self, wvl, n, l):
'''A Python version of the CHIANTI IDL procedure karzas_xs.
Interpolates free-bound gaunt factor of Karzas and Latter, (1961, Astrophysical Journal
Supplement Series, 6, 167) as a function of wavelength (wvl).'''
try:
klgfb = self.Klgfb
except:
self.Klgfb = util.klgfbRead()
klgfb = self.Klgfb
# get log of photon energy relative to the ionization potential
sclE = np.log(self.Ip/(wvl*const.ev2ang))
thisGf = klgfb['klgfb'][n-1, l]
spl = interpolate.splrep(klgfb['pe'], thisGf)
gf = interpolate.splev(sclE, spl)
return gf
#
# ----------------------------------------------------------------------------------------
#
def itoh(self, wvl):
'''Calculates free-free emission with the free-free gaunt factors of Itoh et al. (ApJS 128, 125, 2000).
the relativistic values are valid for 6. < log10(T) < 8.5 and -4. < log10(u) < 1.'''
wvl = np.array(wvl, 'float64')
try:
itohCoef = self.ItohCoef
except:
self.ItohCoef = util.itohRead()['itohCoef'][self.Z-1].reshape(11, 11)
itohCoef = self.ItohCoef
try:
t = (np.log10(self.Temperature) -7.25)/1.25
except:
errorMessage = ' temperature undefined in continuum.itoh'
print(errorMessage)
return {'errorMessage':errorMessage}
if type(self.Temperature) == types.FloatType:
nTemp = 1
else:
nTemp = self.Temperature.size
#
nWvl = wvl.size
#
if (nTemp > 1) and (nWvl > 1):
u = (const.planck*const.light*1.e+8/const.boltzmann)*np.outer(1./self.Temperature, 1./wvl )
lU = (np.log10(u) + 1.5)/2.5
lT = (np.log10(self.Temperature) -7.25)/1.25
g = np.zeros((nTemp, nWvl), 'float64')
rad = np.ma.zeros((nTemp, nWvl), 'float64')
for itemp in range(nTemp):
for j in range(11):
for i in range(11):
g[itemp]+= itohCoef[i,j]*(lT[itemp]**i)*(lU[itemp]**j)
rad[itemp] = (1./wvl)**2*g[itemp]*np.exp(-u[itemp])/np.sqrt(self.Temperature[itemp])
tArray = np.zeros((1, len(self.Temperature)), 'float64')
tArray[0] = self.Temperature
t2Array = np.repeat(tArray.transpose(), len(wvl), axis=1)
nonValidT1 = np.log10(t2Array) < 6.
nonValidT2 = np.log10(t2Array) > 8.5
nonValidT = np.logical_or(nonValidT1, nonValidT2)
nonValidU1 = np.log10(u) < -4.
nonValidU2 = np.log10(u) > 1.
nonValidU = np.logical_or(nonValidU1, nonValidU2)
nonValid = np.logical_or(nonValidT, nonValidU)
rad.mask = nonValid
rad.set_fill_value(0.)
g=np.ma.array(g, mask=nonValid, fill_value=0.)
return {'itohGff':g, 'itohFf':rad}
elif (nTemp > 1) and (nWvl == 1):
u = (const.planck*const.light*1.e+8/const.boltzmann)/(self.Temperature*wvl )
lU = (np.log10(u) + 1.5)/2.5
lT = (np.log10(self.Temperature) -7.25)/1.25
g = np.zeros((nTemp), 'float64')
rad = np.ma.zeros((nTemp), 'float64')
for itemp in range(nTemp):
for j in range(11):
for i in range(11):
g[itemp]+= itohCoef[i,j]*(lT[itemp]**i)*(lU[itemp]**j)
rad[itemp] = (1./wvl)**2*g[itemp]*np.exp(-u[itemp])/np.sqrt(self.Temperature[itemp])
nonValidT1 = np.log10(self.Temperature) < 6.
nonValidT2 = np.log10(self.Temperature) > 8.5
nonValidT = np.logical_or(nonValidT1, nonValidT2)
nonValidU1 = np.log10(u) < -4.
nonValidU2 = np.log10(u) > 1.
nonValidU = np.logical_or(nonValidU1, nonValidU2)
nonValid = np.logical_or(nonValidT, nonValidU)
rad.mask = nonValid
rad.set_fill_value(0.)
g=np.ma.array(g, mask=nonValid, fill_value=0.)
return {'itohGff':g, 'itohFf':rad}
elif (nTemp == 1) and (nWvl > 1):
if (np.log10(self.Temperature) < 6.) or (np.log10(self.Temperature > 8.5)):
errorMessage ='invalid temperature in continuum.itoh()'
return {'errorMessage':errorMessage}
else:
u = (const.planck*const.light*1.e+8/const.boltzmann)/(self.Temperature*wvl )
lU = (np.log10(u) + 1.5)/2.5
lT = (np.log10(self.Temperature) -7.25)/1.25
g = np.zeros(nWvl, 'float64')
rad = np.ma.zeros((nWvl), 'float64')
for j in range(11):
for i in range(11):
g+= itohCoef[i,j]*(lT**i)*(lU**j)
rad = np.ma.array((1./wvl)**2*g*np.exp(-u)/np.sqrt(self.Temperature), 'Float64')
nonValidU1 = np.log10(u) < -4.
nonValidU2 = np.log10(u) > 1.
nonValidU = np.logical_or(nonValidU1, nonValidU2)
nonValid = nonValidU
rad.mask = nonValid
rad.set_fill_value(0.)
g=np.ma.array(g, mask=nonValid, fill_value=0.)
return {'itohGff':g, 'itohFf':rad}
elif (nTemp == 1) and (nWvl == 1):
u = (const.planck*const.light*1.e+8/const.boltzmann)/(self.Temperature*wvl )
if (np.log10(self.Temperature) < 6.) or (np.log10(self.Temperature > 8.5)):
errorMessage ='invalid temperature in continuum.itoh()'
return {'errorMessage':errorMessage}
elif (np.log10(u) < -4.) or (np.log10(u) > 8.5):
errorMessage ='invalid temperature/wavelength in continuum.itoh()'
return {'errorMessage':errorMessage}
else:
u = (const.planck*const.light*1.e+8/const.boltzmann)/(self.Temperature*wvl )
lU = (np.log10(u) + 1.5)/2.5
lT = (np.log10(self.Temperature) -7.25)/1.25
g = np.zeros(nWvl, 'float64')
rad = np.ma.zeros((nWvl), 'float64')
for j in range(11):
for i in range(11):
g+= itohCoef[i,j]*(lT**i)*(lU**j)
rad = np.ma.array((1./wvl)**2*g*np.exp(-u)/np.sqrt(self.Temperature), 'Float64')
nonValidU1 = np.log10(u) < -4.
nonValidU2 = np.log10(u) > 1.
nonValidU = np.logical_or(nonValidU1, nonValidU2)
nonValid = nonValidU
rad.mask = nonValid
rad.set_fill_value(0.)
return {'itohGff':g, 'itohFf':rad}
#
# - - - - - - - - - - - - - - - - - - - - - - -
#
def sutherland(self, wvl):
'''Calculates the free-free continuum using the free-free gaunt factor calculations of Sutherland, 1998, MNRAS, 300, 321.
the wavelengths (wvl) will be sorted, first thing'''
#
wvl = np.array(wvl, 'float64')
nWvl = wvl.size
# factor = 5.44436e-39
try:
temperature = self.Temperature
except:
errorMessage = ' temperature undefined in continuum.sutherland'
print(errorMessage)
return {'errorMessage':errorMessage}
# read in the gaunt factors, if necessary and get interpolator
try:
gffInterpolator = self.GffInterpolator
except:
self.Gff = util.gffRead()
gff = self.Gff
iu=(np.log10(gff['u1d']) + 4.)*10.
ig=(np.log10(gff['g21d']) + 4.)*5.
gaunt = gff['gff']
#tr=Triangulation(iu.flatten(),ig.flatten())
tr=Triangulation(iu,ig)
self.GffInterpolator = tr.nn_interpolator(gaunt.flatten())
gffInterpolator = self.GffInterpolator
#
gga = np.array((float(self.Z)**2*const.ryd2erg/const.boltzmann)*(1./temperature),'float64')
nonValidGg1 = np.log10(gga) < -4.
nonValidGg2 = np.log10(gga) > 4.
nonValidGg = np.logical_or(nonValidGg1, nonValidGg2)
ggOut = np.ma.array(gga, mask = nonValidGg, fill_value=True)
iGg = np.ma.array((np.log10(gga) + 4.)*5., mask=nonValidGg, fill_value=0.)
#
if nonValidGg.sum():
errorMessage = 'no valid temperatures in continuum.sutherland'
print(errorMessage)
return {'errorMessage':errorMessage}
else:
#iUu = np.ma.array((np.log10(uu) + 4.)*10., mask=nonValidUu, fill_value=0.)
#iGg = (np.log10(gg) + 4.)*5.
#print ' iGg.shape = ',iGg, iGg.shape
#
nWvl = wvl.size
nTemp = temperature.size
#
if (nTemp > 1) and (nWvl > 1):
ff = np.ma.zeros((nWvl, nTemp), 'float64')
gffOut1 = np.ma.zeros((nWvl, nTemp), 'float64')
gffOutMask = np.zeros((nWvl, nTemp), 'Bool')
uuOut = np.zeros((nWvl, nTemp), 'float64')
for iwvl in range(nWvl):
uu = ((const.planck*const.light*1.e+8/const.boltzmann)/(wvl[iwvl]*temperature)) #.flatten()
nonValidUu1 = np.log10(uu) < -4.
nonValidUu2 = np.log10(uu) > 4.
nonValidUu = np.logical_or(nonValidUu1, nonValidUu2)
gffOutMask[iwvl] = nonValidUu
uuOut[iwvl] = np.ma.array(uu, mask=nonValidUu, fill_value=True)
iUu = np.ma.array((np.log10(uu) + 4.)*10., mask=nonValidUu, fill_value=0.)
gffOut1[iwvl] = gffInterpolator(iUu, iGg)
wvlt = 1./(wvl[iwvl]**2*np.sqrt(temperature)) # was sortedTemperature
ff[iwvl] = (np.exp(-uuOut[iwvl])*gffOut1[iwvl]*wvlt)
gffOut1.mask = gffOutMask
gffOut1.set_fill_value(0.)
gffOut = gffOut1.transpose()
ff.mask = gffOutMask
ff.set_fill_value(0.)
return {'suthFf':ff.transpose(), 'suthGff':gffOut}
#
if (nTemp == 1) and (nWvl > 1):
uu = ((const.planck*const.light*1.e+8/const.boltzmann)/(wvl*temperature)) # .flatten()
nonValidUu1 = np.log10(uu) < -4.
nonValidUu2 = np.log10(uu) > 4.
nonValidUu = np.logical_or(nonValidUu1, nonValidUu2)
gffOutMask = nonValidUu
iUu = (np.log10(uu) + 4.)*10.
gffOut1 = gffInterpolator(iUu, iGg.repeat(nWvl))
wvlt = 1./(wvl**2*np.sqrt(temperature))
ff = np.ma.array(np.exp(-uu)*gffOut1*wvlt)
ff.mask=gffOutMask
ff.set_fill_value(0.)
gffOut = np.ma.array(gffOut1, mask=gffOutMask, fill_value=0.)
return {'suthFf':ff, 'suthGff':gffOut, 'iUu':iUu, 'gffOut1':gffOut1, 'wvlt':wvlt, 'iGg':iGg.repeat(nWvl), 'gffInterpolator':gffInterpolator}
#elif (nTemp > 1) and (nWvl == 1):
else:
#print ' igg.shape = ',iGg.shape
#gffOut1 = np.ma.zeros((nTemp), 'float64')
#gffOutMask = np.zeros((nTemp), 'Bool')
#uuOut = np.zeros((nTemp), 'float64')
#
uu = (const.planck*const.light*1.e+8/const.boltzmann) /(wvl*temperature).flatten()
nonValidUu1 = np.log10(uu) < -4.
nonValidUu2 = np.log10(uu) > 4.
nonValidUu = np.logical_or(nonValidUu1, nonValidUu2)
gffOutMask = nonValidUu
uuOut = np.ma.array(uu, mask=nonValidUu, fill_value=True)
#iUu = np.ma.array((np.log10(uu) + 4.)*10., mask=nonValidUu, fill_value=0.)
iUu = (np.log10(uu) + 4.)*10.
#print ' iUu.shape = ',iUu.shape
gffOut1 = gffInterpolator(iUu, iGg.flatten())
#
wvlt = 1./(wvl**2*np.sqrt(temperature))
ff1 = np.exp(-uuOut)*gffOut1*wvlt
ff = np.ma.array(ff1, mask=gffOutMask, fill_value=0.)
gffOut = np.ma.array(gffOut1, mask=gffOutMask, fill_value=0.)
return {'suthFf':ff, 'suthGff':gffOut}
#elif (nTemp == 1) and (nWvl == 1):
##iGg = (np.log10(gg) + 4.)*5.
#uu = (const.planck*const.light*1.e+8/const.boltzmann)/(wvl*temperature)
#nonValidUu1 = np.log10(uu) < -4.
#nonValidUu2 = np.log10(uu) > 4.
#nonValidUu = np.logical_or(nonValidUu1, nonValidUu2)
#if not nonValidUu:
#iUu = (np.log10(uu) + 4.)*10.
#gffOut = gffInterpolator(iUu, iGg)
#wvlt = 1./(wvl**2*np.sqrt(temperature))
#ff = np.exp(-uu)*gffOut*wvlt
#return {'suthFf':ff, 'suthGff':gffOut}
#else:
#errorMessage = 'invalid Temperature/Wavlength in continuum.sutherland'
#print(errorMessage)
#return {'errorMessage':errorMessage}
#
# ----------------------------------------------------------------------------------------
#
def ioneqOne(self):
'''Provide the ionization equilibrium for the selected ion as a function of temperature.
returned in self.IoneqOne
this is a duplicate of the method ion.ioneqOne '''
#
try:
temperature = self.Temperature
except:
return
#
try:
ioneqAll = self.IoneqAll
except:
self.IoneqAll = util.ioneqRead(ioneqname = self.Defaults['ioneqfile'])
ioneqAll=self.IoneqAll
#
ioneqTemperature = ioneqAll['ioneqTemperature']
Z=self.Z
Ion=self.Ion
Dielectronic=self.Dielectronic
ioneqOne = np.zeros_like(temperature)
#
thisIoneq=ioneqAll['ioneqAll'][Z-1,Ion-1-Dielectronic].squeeze()
# thisIoneq = self.Ioneq
gioneq=thisIoneq > 0.
goodt1=self.Temperature >= ioneqTemperature[gioneq].min()
goodt2=self.Temperature <= ioneqTemperature[gioneq].max()
goodt=np.logical_and(goodt1,goodt2)
y2=interpolate.splrep(np.log(ioneqTemperature[gioneq]),np.log(thisIoneq[gioneq]),s=0)
#
if goodt.sum() > 0:
if self.Temperature.size > 1:
gIoneq=interpolate.splev(np.log(self.Temperature[goodt]),y2) #,der=0)
ioneqOne[goodt] = np.exp(gIoneq)
else:
gIoneq=interpolate.splev(np.log(self.Temperature),y2)
ioneqOne = np.exp(gIoneq)
else:
ioneqOne = 0.
#
self.IoneqOne = ioneqOne
#
# -------------------------------------------------------------------------------------
#
| gpl-3.0 |
ryanjmccall/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_template.py | 70 | 8806 | """
This is a fully functional do nothing backend to provide a template to
backend writers. It is fully functional in that you can select it as
a backend with
import matplotlib
matplotlib.use('Template')
and your matplotlib scripts will (should!) run without error, though
no output is produced. This provides a nice starting point for
backend writers because you can selectively implement methods
(draw_rectangle, draw_lines, etc...) and slowly see your figure come
to life w/o having to have a full blown implementation before getting
any results.
Copy this to backend_xxx.py and replace all instances of 'template'
with 'xxx'. Then implement the class methods and functions below, and
add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
'xxx' to the backends list in the validate_backend methon in
matplotlib/__init__.py and you're off. You can use your backend with::
import matplotlib
matplotlib.use('xxx')
from pylab import *
plot([1,2,3])
show()
matplotlib also supports external backends, so you can place you can
use any module in your PYTHONPATH with the syntax::
import matplotlib
matplotlib.use('module://my_backend')
where my_backend.py is your module name. Thus syntax is also
recognized in the rc file and in the -d argument in pylab, eg::
python simple_plot.py -dmodule://my_backend
The files that are most relevant to backend_writers are
matplotlib/backends/backend_your_backend.py
matplotlib/backend_bases.py
matplotlib/backends/__init__.py
matplotlib/__init__.py
matplotlib/_pylab_helpers.py
Naming Conventions
* classes Upper or MixedUpperCase
* varables lower or lowerUpper
* functions lower or underscore_separated
"""
from __future__ import division
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
class RendererTemplate(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to backend_bases.RendererBase for
documentation of the classes methods.
"""
def __init__(self, dpi):
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, master_transform, cliprect, clippath,
# clippath_trans, paths, all_transforms, offsets,
# offsetTrans, facecolors, edgecolors, linewidths,
# linestyles, antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, master_transform, cliprect, clippath,
# clippath_trans, meshWidth, meshHeight, coordinates,
# offsets, offsetTrans, facecolors, antialiased,
# showedges):
# pass
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
pass
def flipy(self):
return True
def get_canvas_width_height(self):
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
#return points/72.0 * self.dpi.get()
class GraphicsContextTemplate(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasTemplate(thisFig)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureCanvasTemplate(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Note GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See,
eg backend_gtk.py, backend_wx.py and backend_tkagg.py
"""
def draw(self):
"""
Draw the figure using the renderer
"""
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['foo'] = 'My magic Foo format'
def print_foo(self, filename, *args, **kwargs):
"""
Write out format foo. The dpi, facecolor and edgecolor are restored
to their original values after this call, so you don't need to
save and restore them.
"""
pass
def get_default_filetype(self):
return 'foo'
class FigureManagerTemplate(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerTemplate
| gpl-3.0 |
hugobowne/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
Sentient07/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
phzfi/RIC | scripts/resize-performance.py | 1 | 6566 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Single image resize perf-test.
"""
import argparse
import http.client
import io
import logging
import statistics
import sys
import time
from PIL import Image
PLT = None
NP = None
try:
import matplotlib.pyplot
import numpy
PLT = matplotlib.pyplot
NP = numpy
except ImportError:
print('Matplotlib or Numpy not found, skipping plotting functionality')
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO
)
LOGGER = logging.getLogger(__name__)
def get_opts(args):
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', help='Print debug information',
default=False, type=bool)
parser.add_argument('--host', help='The target hostname to query',
default='localhost')
parser.add_argument('-p', '--port', help='The target host port to query',
default=8005, type=int)
parser.add_argument('-i', '--image', help='The image id to query for',
default='01.jpg')
parser.add_argument('-s', '--step', help='Resize in this large steps',
default=8, type=int)
parser.add_argument('--xkcd', help='plot in XKCD style',
default=False, type=bool)
return parser.parse_args(args)
class Query(object):
def __init__(self, options):
self.client = http.client.HTTPConnection(options.host, options.port,
timeout=30)
if options.debug:
self.client.set_debuglevel(1)
self.id = '/' + options.image
def get_response(self, url):
response = None
begin = None
duration = None
try:
self.client.request('GET', url)
begin = time.time()
response = self.client.getresponse()
duration = time.time() - begin
except http.client.HTTPException as excp:
LOGGER.exception('HTTPException', exc_info=excp)
return None
if response.status != 200:
LOGGER.warn('STATUS: ' + str(response.status))
return None
return {
'elapsed': duration,
'content': response.read()
}
def get_original_size(self):
response = self.get_response(self.id)
if response is None:
return None
vfile = io.BytesIO(response['content'])
vimage = Image.open(vfile)
vfile.close()
return vimage.size
def get_resized_time(self, width, height):
url = '{0}?width={1}&height={2}'.format(self.id, width, height)
response = self.get_response(url)
if response is None:
return None
return response['elapsed']
def paint(raw, plotdata, xkcd=False):
# pixels -> time
xsrc = sorted(plotdata.keys())
N = len(xsrc)
xdata = NP.zeros((N, 1))
ydata = NP.zeros((N, 1))
for i, x in enumerate(xsrc):
xdata[i, 0] = x
ydata[i, 0] = statistics.mean(plotdata[x])
# Ordered lists are nice ;)
min_pixels = xsrc[0]
max_pixels = xsrc[-1]
min_time = raw[0]
max_time = raw[-1]
rect = [min_pixels, min_time, max_pixels - min_time, max_time - min_time]
# Clear
PLT.figure('raw data')
PLT.cla()
PLT.clf()
if xkcd:
PLT.xkcd()
#PLT.axes(rect, axisbg='w', frameon=True)
PLT.xlabel('pixels')
PLT.ylabel('seconds')
PLT.grid(True, which='major', axis='both', linestyle='--')
# Errors
yerr = NP.zeros((2, N))
for i in range(N):
x, y = xdata[i, 0], ydata[i, 0]
ys = plotdata[x]
devi = abs(statistics.stdev(ys) - y) if len(ys) > 1 else 0.0
yerr[0, i] = devi
yerr[1, i] = devi
PLT.errorbar(xdata, ydata, yerr)
PLT.plot(xdata, ydata, 'r-')
PLT.axis('auto')
# Second plot
PLT.figure('grouped data')
PLT.title('Average time taken to resize vs Pixel count')
if xkcd:
PLT.xkcd()
M = N // 4
gxdata = NP.zeros((M, 1))
gydata = NP.zeros((M, 1))
for i in range(M):
imax = min(N, (i+1) * 4)
xkeys = xsrc[i*4:imax]
gxdata[i, 0] = statistics.mean(xkeys)
ykeys = [statistics.mean(plotdata[x]) for x in xkeys]
gydata[i, 0] = statistics.mean(ykeys)
# Apply Laplacian smoothing
LWIDTH = 2
LCOUNT = 2
for _ in range(LCOUNT):
nydata = NP.zeros((M, 1))
for i in range(1, M - 1):
imin = max(0, i - LWIDTH)
imax = min(M - 1, i + LWIDTH)
nydata[i, 0] = statistics.mean(gydata[imin:imax, 0])
gydata[1:M-1,0] = nydata[1:M-1,0]
PLT.xlabel('pixels')
PLT.ylabel('seconds')
PLT.grid(True, which='major', axis='both', linestyle='--')
PLT.plot(gxdata, gydata, 'g-')
PLT.show()
def main(options):
LOGGER.info('hello LOGGER')
logging.info('hello logging')
query = Query(options)
size = query.get_original_size()
if size is None:
return 1
LOGGER.info('Original: {0}x{1}'.format(*size))
width, height = size
timings = []
tplot = {}
for h in range(1, height, options.step):
LOGGER.info('Query range with height={0}'.format(h))
for w in range(1, width, options.step):
elapsed = query.get_resized_time(w, h)
if elapsed is None:
continue
timings.append(elapsed)
pixels = w * h
if pixels not in tplot:
tplot[pixels] = []
tplot[pixels].append(elapsed)
count = len(timings)
ok_set = sorted(list(filter(lambda x: x is not None, timings)))
count_ok = len(ok_set)
print('Query count: {0}'.format(count))
print('Successful transfers: {0}'.format(count_ok))
if count_ok < 1:
LOGGER.error('Can not produce statistics because of too many failures')
return 1
mintime = min(ok_set)
maxtime = max(ok_set)
mean = statistics.mean(ok_set)
median = statistics.median(ok_set)
total = sum(ok_set)
print('min: {0} s'.format(mintime))
print('max: {0} s'.format(maxtime))
print('mean: {0} s'.format(mean))
print('median: {0} s'.format(median))
print('sum: {0} s'.format(total))
if count_ok < 2:
return 1
deviation = statistics.stdev(ok_set)
print('standard-deviation: {0} s'.format(deviation))
if PLT is not None:
paint(ok_set, tplot)
return 0
if __name__ == '__main__':
options = get_opts(sys.argv[1:])
sys.exit(main(options))
| mit |
bramjochems/BJFinanceLib | BJFinanceLib/math/kalmanregression.py | 1 | 5711 | # -*- coding: utf-8 -*-
from numbers import Number
import numpy as np
from BJFinanceLib.utils.kalman import KalmanFilter
class KalmanRegression():
"""
Class that performs online regression where parameters are updated based on
a Kalman Filter.
"""
def __init__(self,X,y,
add_intercept=False,
burn_in_period=None,
initial_state_mean=None,
observation_covariance=None,
initial_state_covariance=None,
transition_covariance=None ):
"""
Initializes the class.
Arguments
X : observations with each row a distinct observation. Can be either a
np array or a pandas dataframe.
y : target values for the regression. Must be (effectively) one-
dimensional.
add_intercept: Optional boolean, default=False. If true, an intercept
is added to the model as a last regression coefficient.
burn_in_period: Optional int that specifies the number of observations
left out for initial training of the algorithm. If not
specified, no initial calibration on training data is
done (which makes error statistics a bit worse). The
burn_in_period just uses a regular regression for burn-
in, not some online method.
initial_state_mean: Initial mean for the states. Optional, starting
from zero if not specified. If a burn_in_period is
specfied, this paramter gets ignored.
observation_covariance: Noise inherent to the y-values. Scalar.
Optional, if not specified, an estimated value
is used that might be completely off.
initial_state_covariance: Optional input for state covariances to start
from. The covariance estimates get updated
over time. Ignored if a burn-in period is set.
The initial state covariance determines how
initially the
transition_covariance: How much our parameters can vary over time. Optional.
Square matrix of size equal to the number of
regressors (remember to add one for the
intercept if an intercept is used) or a scalar.
.
"""
# Process arrays into uniform input format
X_internal = np.reshape(X.copy(), (len(X),-1)) # Always 2d
y_internal = np.reshape(y.copy(), (len(y),1)) # Always 2D array but only single column
nobs,nvars = np.shape(X_internal)
if add_intercept:
X_internal = np.hstack((X_internal,np.ones((len(X_internal),1))))
nvars = nvars+1
X_internal = np.expand_dims(X_internal,axis=1)
# Some input validation
if (len(X_internal) != len(y_internal)):
raise Exception("Different number of observations and independent variables")
if burn_in_period:
raise Exception("Burn-in period not implemented yet")
#To implement, make sure taht initial_state_mean, initial_sate_cov,
#observation_covariance and transition_covariance are filled.
else:
if not initial_state_mean:
initial_state_mean = [0 for i in range(nvars)]
if not observation_covariance:
observation_covariance = np.std(y_internal) / 5 # Assume 20% of noise of y is inherent to y
if initial_state_covariance:
if isinstance(initial_state_covariance,Number):
initial_state_cov = initial_state_covariance*np.ones((nvars,nvars))
else:
initial_state_cov = initial_state_covariance
else:
initial_state_cov = np.ones((nvars,nvars))
if not transition_covariance:
transition_covariance = 0.01 * np.eye(nvars)
elif isinstance(transition_covariance,Number):
transition_covariance *= np.eye(nvars)
self.__kalman = KalmanFilter(n_dim_obs=1,
n_dim_state=nvars,
initial_state_mean=initial_state_mean,
initial_state_covariance=initial_state_cov,
transition_matrices=np.eye(nvars),
observation_matrices=X_internal,
observation_covariance=observation_covariance,
transition_covariance=transition_covariance)
""" The internal kalman filter class that's used """
state_means,state_covs = self.__kalman.filter(y_internal) #do the actual filtering
self.rolling_coefficients = state_means
"""
Provides the regression coefficients as an array where the rows are the
various points available and the columns contain the regression weights.
"""
self.rolling_coefficient_covariances = state_covs
"""
Covariance matrix for the regression paramters as estimated by the
Kalman filter.
"""
| gpl-3.0 |
Moriadry/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 62 | 3960 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with ops.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(
dataframe, batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with ops.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [
j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
IshankGulati/scikit-learn | sklearn/ensemble/tests/test_forest.py | 19 | 41737 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Using a Python 2.x list as the sample_weight parameter used to raise
# an exception. This test makes sure such code will now run correctly.
clf = ForestClassifier()
sample_weight = [1.] * len(iris.data)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
ruymanengithub/vison | vison/pump/TP01aux.py | 1 | 4661 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Auxiliary Functions and resources to TP01.
Created on Wed Jan 31 15:11:00 2018
:author: Ruyman Azzollini
"""
# IMPORT STUFF
from pdb import set_trace as stop
import numpy as np
import os
from collections import OrderedDict
from matplotlib import cm
import pandas as pd
from vison.datamodel import cdp
from vison.plot import figclasses
from vison.plot import trends
# END IMPORT
CCDs = ['CCD1', 'CCD2', 'CCD3']
check_offsets_dict = dict(stats=['offset_pre', 'offset_ove'],
trendaxis='time',
figname='TP01_offset_vs_time.png',
caption='TP01: offset vs. time.',
meta=dict(doLegend=True,
doNiceXDate=True,
suptitle='TP01-checks: offsets',
ylim=trends.offset_lims))
check_deltaoff_dict = dict(
stats=[
'deltaoff_pre',
'deltaoff_ove'],
trendaxis='time',
figname='TP01_deltaoff_vs_time.png',
caption='TP01: $\delta$offset vs. time. Offset value in each frame minus the average value.',
meta=dict(
doLegend=True,
doNiceXDate=True,
suptitle='TP01-checks: delta-offset',
ylim=[
-10.,
10.]))
check_std_dict = dict(stats=['std_pre', 'std_ove'],
trendaxis='time',
figname='TP01_std_vs_time.png',
caption='TP01: std vs. time.',
meta=dict(doLegend=True,
doNiceXDate=True,
suptitle='TP01-checks: std',
ylim=trends.RON_lims))
check_injlevel_dict = dict(stats=['chk_mea_inject', 'chk_med_inject'],
trendaxis='time',
figname='TP01_injlevel_vs_time.png',
caption='TP01: Injection Level vs. time.',
meta=dict(doLegend=True,
doNiceXDate=True,
suptitle='TP01-checks: Injection Level'))
check_injstd_dict = dict(stats=['chk_std_inject'],
trendaxis='time',
figname='TP01_injstd_vs_time.png',
caption='TP01: Injection STD vs. time.',
meta=dict(doLegend=False,
doNiceXDate=True,
suptitle='TP01-checks: Injection STD')
)
def gt_meta_heatmaps(mkey):
return dict(
figname='TP01_PcTau_Heatmap_%s.png' % mkey,
caption='TP01: log10(tau[us])-log10(Pc) Heatmap for pumping mode %s.' % mkey,
meta=dict(doLegend=False,
doColorbar=False,
suptitle='TP01: %s' % mkey,
xlabel='log10(tau [us])',
ylabel='log10(Pc)',
corekwargs=dict(cmap=cm.inferno_r,
aspect='auto',
norm=None,
origin='lower left',
extent=[]))
)
def get_TP01figs():
TP01figs = dict()
TP01figs['TP01checks_offsets'] = [
trends.Fig_Basic_Checkstat, check_offsets_dict]
TP01figs['TP01checks_deltaoff'] = [
trends.Fig_Basic_Checkstat, check_deltaoff_dict]
TP01figs['TP01checks_stds'] = [trends.Fig_Basic_Checkstat, check_std_dict]
TP01figs['TP01checks_injlevel'] = [
trends.Fig_Basic_Checkstat, check_injlevel_dict]
TP01figs['TP01checks_injstd'] = [trends.Fig_Basic_Checkstat, check_injstd_dict]
for mkey in ['m123', 'm234', 'm341', 'm412']:
TP01figs['TP01meta_%s' % mkey] = [figclasses.Fig_BeamImgShow,
gt_meta_heatmaps(mkey)]
TP01figs['BlueScreen'] = [figclasses.BlueScreen, dict()]
return TP01figs
def get_CDP_lib():
""" """
CDP_lib = OrderedDict()
for CCD in CCDs:
mastercat = cdp.CDP()
mastercat.rootname = 'TP01_MasterCat_%s' % CCD
CDP_lib['MASTERCAT_%s' % CCD] = mastercat
mergedcat = cdp.CDP()
mergedcat.rootname = 'TP01_MergedCat_%s' % CCD
CDP_lib['MERGEDCAT_%s' % CCD] = mergedcat
chinjcharact_cdp = cdp.CDP()
chinjcharact_cdp.rootname = 'TP01_CHINJCHAR'
CDP_lib['CHINJCHARACT'] = chinjcharact_cdp
chinj_cdp = cdp.Tables_CDP()
chinj_cdp.rootname = 'TP01_CHINJ'
CDP_lib['CHINJ'] = chinj_cdp
return CDP_lib
| gpl-3.0 |
elkingtonmcb/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
thast/EOSC513 | DC/SparseGN/DCT_withW_rad/DCT_withW.py | 1 | 11602 | from SimPEG import Mesh, Regularization, Maps, Utils, EM
from SimPEG.EM.Static import DC
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import copy
import pandas as pd
from scipy.sparse import csr_matrix, spdiags, dia_matrix,diags
from scipy.sparse.linalg import spsolve
from scipy.stats import norm,multivariate_normal
import sys
path ="../pymatsolver/"
path = "../../../Documents/pymatsolver/"
sys.path.append(path)
from pymatsolver import PardisoSolver
from scipy.interpolate import LinearNDInterpolator, interp1d
from sklearn.mixture import GaussianMixture
from SimPEG import DataMisfit, Regularization, Optimization, InvProblem, Directives, Inversion
import SimPEG
import scipy.sparse as sp
#2D model
csx, csy, csz = 0.25,0.25,0.25
# Number of core cells in each directiPon s
ncx, ncz = 123,41
# Number of padding cells to add in each direction
npad = 12
# Vectors of cell lengthts in each direction
hx = [(csx,npad, -1.5),(csx,ncx),(csx,npad, 1.5)]
hz= [(csz,npad,-1.5),(csz,ncz)]
# Create mesh
mesh = Mesh.TensorMesh([hx, hz],x0="CN")
# Map mesh coordinates from local to UTM coordiantes
#mesh.x0[2] = mesh.x0[2]-mesh.vectorCCz[-npad-1]
mesh.x0[1] = mesh.x0[1]+csz/2.
#mesh.x0[0] = mesh.x0[0]+csx/2.
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#plt.gca().set_xlim([-20,20])
#plt.gca().set_ylim([-15,0])
#mesh.plotGrid()
#plt.gca().set_aspect('equal')
#plt.show()
print "Mesh Size: ", mesh.nC
#Model Creation
lnsig_air = 1e-8;
x0,z0, r0 = -6., -4., 3.
x1,z1, r1 = 6., -4., 3.
ln_sigback = -5.
ln_sigc = -3.
ln_sigr = -7.
noisemean = 0.
noisevar = 0.0
overburden_extent = 0.
ln_over = -4.
#m = (lnsig_background)*np.ones(mesh.nC);
#mu =np.ones(mesh.nC);
mtrue = ln_sigback*np.ones(mesh.nC) + norm(noisemean,noisevar).rvs(mesh.nC)
overb = (mesh.gridCC[:,1] >-overburden_extent) & (mesh.gridCC[:,1]<=0)
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])+ norm(noisemean,noisevar).rvs(np.prod((mtrue[overb]).shape))
csph = (np.sqrt((mesh.gridCC[:,1]-z0)**2.+(mesh.gridCC[:,0]-x0)**2.))< r0
mtrue[csph] = ln_sigc*np.ones_like(mtrue[csph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[csph]).shape))
#Define the sphere limit
rsph = (np.sqrt((mesh.gridCC[:,1]-z1)**2.+(mesh.gridCC[:,0]-x1)**2.))< r1
mtrue[rsph] = ln_sigr*np.ones_like(mtrue[rsph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[rsph]).shape))
mtrue = Utils.mkvc(mtrue);
mesh.plotGrid()
plt.gca().set_xlim([-10,10])
plt.gca().set_ylim([-10,0])
xyzlim = np.r_[[[-10.,10.],[-10.,1.]]]
actind, meshCore = Utils.meshutils.ExtractCoreMesh(xyzlim,mesh)
plt.hist(mtrue[actind],bins =50,normed=True);
fig0 = plt.figure()
ax0 = fig0.add_subplot(111)
mm = meshCore.plotImage(mtrue[actind],ax = ax0)
plt.colorbar(mm[0])
ax0.set_aspect("equal")
#plt.show()
def getCylinderPoints(xc,zc,r):
xLocOrig1 = np.arange(-r,r+r/10.,r/10.)
xLocOrig2 = np.arange(r,-r-r/10.,-r/10.)
# Top half of cylinder
zLoc1 = np.sqrt(-xLocOrig1**2.+r**2.)+zc
# Bottom half of cylinder
zLoc2 = -np.sqrt(-xLocOrig2**2.+r**2.)+zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc*np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc*np.ones_like(xLocOrig2)
topHalf = np.vstack([xLoc1,zLoc1]).T
topHalf = topHalf[0:-1,:]
bottomHalf = np.vstack([xLoc2,zLoc2]).T
bottomHalf = bottomHalf[0:-1,:]
cylinderPoints = np.vstack([topHalf,bottomHalf])
cylinderPoints = np.vstack([cylinderPoints,topHalf[0,:]])
return cylinderPoints
cylinderPoints0 = getCylinderPoints(x0,z1,r0)
cylinderPoints1 = getCylinderPoints(x1,z1,r1)
#Gradient array 1 2D
srclist = []
nSrc = 23
lines = 1
ylines = np.r_[0.]
xlines = np.r_[0.]
z = 0.
#xline
for k in range(lines):
for i in range(nSrc):
if i<=11:
locA = np.r_[-14.+1., z]
locB = np.r_[-8.+2.*i-1., z]
#M = np.c_[np.arange(-12.,-12+2*(i+1),2),np.ones(i+1)*z]
#N = np.c_[np.arange(-10.,-10+2*(i+1),2),np.ones(i+1)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
#print -locA,-locB,"\n",[-M,-N],"\n"
else:
locA = np.r_[-14.+2*(i-11)+1., z]
locB = np.r_[14.-1.,z]
#M = np.c_[np.arange(locA[0]+1.,12.,2),np.ones(nSrc-i)*z]
#N = np.c_[np.arange(locA[0]+3.,14.,2),np.ones(nSrc-i)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print "line2",locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
mapping = Maps.ExpMap(mesh)
survey = DC.Survey(srclist)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
problem.Solver = PardisoSolver
survey.dobs = survey.dpred(mtrue)
survey.std = 0.05*np.ones_like(survey.dobs)
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
dmisAll = DataMisfit.l2_DataMisfit(survey)
print '# of data: ', survey.dobs.shape
class SimultaneousSrc(DC.Src.BaseSrc):
"""
Dipole source
"""
QW = None
Q = None
W = None
def __init__(self, rxList,Q,W, **kwargs):
SimPEG.Survey.BaseSrc.__init__(self, rxList, **kwargs)
def eval(self, prob):
return self.QW
class SimultaneousRx(DC.Rx.BaseRx):
"""
SimultaneousRx receiver
"""
def __init__(self, locs, rxType='phi', **kwargs):
# We may not need this ...
SimPEG.Survey.BaseRx.__init__(self, locs, rxType)
@property
def nD(self):
"""Number of data in the receiver."""
return self.locs.shape[0]
# Not sure why ...
# return int(self.locs[0].size / 2)
def getP(self, mesh, Gloc):
return self.locs
P = []
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
P = rx.getP(mesh,'CC')
from SimPEG.Maps import IdentityMap
from scipy.fftpack import dct,idct
class DCTMap(IdentityMap):
def __init__(self, mesh=None, nP=None, **kwargs):
super(DCTMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m):
return Utils.mkvc(dct(dct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
def deriv(self, m, v=None):
if v is not None:
return dct(dct(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho')
else:
print "not implemented"
def inverse(self, m):
return Utils.mkvc(idct(idct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
class iDCTMap(IdentityMap):
def __init__(self, mesh, nP=None, **kwargs):
super(iDCTMap, self).__init__(mesh=mesh, nP=nP, **kwargs)
def _transform(self, m):
return Utils.mkvc(idct(idct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
def deriv(self, m, v=None):
if v is not None:
return idct(idct(v.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho')
else:
print "not implemented"
def inverse(self, m):
return Utils.mkvc(dct(dct(m.reshape(self.mesh.nCx,self.mesh.nCy,order = 'F'), axis=0,norm = 'ortho'), axis=1,norm = 'ortho'))
idctmap = iDCTMap(mesh)
dctmap = DCTMap(mesh)
import spgl1
#Parameter for SPGL1 iterations
nits = 10
mdct = (-5.)*np.ones_like(mtrue)
it = 0
#phi_d_normal = np.load('../phid_normal.npy')
#ratio = np.r_[6.5,phi_d_normal[0:-1]/phi_d_normal[1:]]
ratio = 10.*np.ones(nits)
min_progress = 1.2
xlist = []
#Parameters for W
nsubSrc = 5
InnerIt = 1
dmisfitsub = []
dmisfitall = []
dmisfitall.append(dmisAll.eval(mdct)/survey.nD)
#Initialize Random Source
W = np.random.randint(0, high=2, size=[survey.nSrc,nsubSrc])*2-1
#problem.unpair()
#roblem.pair(survey)
Q = problem.getRHS()
sub = problem.getRHS().dot(W)
rx_r = SimultaneousRx(locs=P)
srcList_r = []
for isrc in range(sub.shape[1]):
src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
srcList_r.append(src_r)
survey_r = DC.Survey(srcList_r)
problem.unpair()
problem.pair(survey_r)
d = survey_r.dpred(mtrue)
survey_r.dobs = d
survey_r.std = np.ones_like(d)*0.05
survey_r.eps = 1e-5*np.linalg.norm(survey_r.dobs)
dmis = DataMisfit.l2_DataMisfit(survey_r)
dmisfitsub.append(dmis.eval(mdct)/survey_r.nD)
problem.unpair()
problem.pair(survey)
print "end iteration: ",it, '; Overall Normalized Misfit: ', dmisAll.eval(mdct)/survey.nD
while (dmisAll.eval(mdct)/survey.nD)>0.5 and it<nits:
problem.unpair()
problem.pair(survey_r)
def JS(x,mode):
if mode == 1:
return problem.Jvec(mdct,idctmap*x)
else:
return dctmap*problem.Jtvec(mdct,x)
b = survey_r.dpred(mdct)-survey_r.dpred(mtrue)
print "# of data: ", b.shape
opts = spgl1.spgSetParms({'iterations':100, 'verbosity':2})
sigtol = np.linalg.norm(b)/np.maximum(ratio[it],min_progress)
#tautol = 20000.
x,resid,grad,info = spgl1.spg_bpdn(JS, b, sigma = sigtol,options=opts)
#x,resid,grad,info = spgl1.spg_lasso(JS,b,tautol,opts)
assert dmis.eval(mdct) > dmis.eval(mdct - idctmap*x)
mdct = mdct - idctmap*x
xlist.append(x)
it +=1
print "end iteration: ",it, '; Subsample Normalized Misfit: ', dmis.eval(mdct)/survey_r.nD
dmisfitsub.append(dmis.eval(mdct)/survey_r.nD)
problem.unpair()
problem.pair(survey)
dmisfitall.append(dmisAll.eval(mdct)/survey.nD)
print "Dmisfit compared to full dataset: ",dmisAll.eval(mdct)/survey.nD
if np.mod(it,InnerIt) ==0:
W = np.random.randint(0, high=2, size=[survey.nSrc,nsubSrc])*2-1
print 'update W'
#problem.unpair()
#roblem.pair(survey)
Q = problem.getRHS()
sub = problem.getRHS().dot(W)
rx_r = SimultaneousRx(locs=P)
srcList_r = []
for isrc in range(sub.shape[1]):
src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
srcList_r.append(src_r)
survey_r = DC.Survey(srcList_r)
problem.unpair()
problem.pair(survey_r)
dmis = DataMisfit.l2_DataMisfit(survey_r)
d = survey_r.dpred(mtrue)
survey_r.dobs = d
survey_r.std = np.ones_like(d)*0.05
survey_r.eps = 1e-5*np.linalg.norm(survey_r.dobs)
print "end Update W; iteration: ",it, '; New Subsample Normalized Misfit: ', dmis.eval(mdct)/survey_r.nD
problem.unpair()
problem.pair(survey)
np.save('./dmisfitsub.npy',dmisfitsub)
np.save('./dmisfitall.npy',dmisfitall)
np.save('./mfinal.npy',mdct)
np.save('./xlist.npy',x)
mm = mesh.plotImage(mdct)
plt.colorbar(mm[0])
plt.gca().set_xlim([-10.,10.])
plt.gca().set_ylim([-10.,0.])
plt.plot(cylinderPoints0[:,0],cylinderPoints0[:,1], linestyle = 'dashed', color='k')
plt.plot(cylinderPoints1[:,0],cylinderPoints1[:,1], linestyle = 'dashed', color='k')
plt.show() | mit |
sanketloke/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 34 | 50761 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
Intel-Corporation/tensorflow | tensorflow/contrib/distributions/python/ops/mixture_same_family.py | 18 | 15014 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The same-family Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class MixtureSameFamily(distribution.Distribution):
"""Mixture (same-family) distribution.
The `MixtureSameFamily` distribution implements a (batch of) mixture
distribution where all components are from different parameterizations of the
same distribution type. It is parameterized by a `Categorical` "selecting
distribution" (over `k` components) and a components distribution, i.e., a
`Distribution` with a rightmost batch shape (equal to `[k]`) which indexes
each (batch of) component.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
### Create a mixture of two scalar Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.Normal(
loc=[-1., 1], # One for each component.
scale=[0.1, 0.5])) # And same here.
gm.mean()
# ==> 0.4
gm.variance()
# ==> 1.018
# Plot PDF.
x = np.linspace(-2., 3., int(1e4), dtype=np.float32)
import matplotlib.pyplot as plt
plt.plot(x, gm.prob(x).eval());
### Create a mixture of two Bivariate Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.MultivariateNormalDiag(
loc=[[-1., 1], # component 1
[1, -1]], # component 2
scale_identity_multiplier=[.3, .6]))
gm.mean()
# ==> array([ 0.4, -0.4], dtype=float32)
gm.covariance()
# ==> array([[ 1.119, -0.84],
# [-0.84, 1.119]], dtype=float32)
# Plot PDF contours.
def meshgrid(x, y=x):
[gx, gy] = np.meshgrid(x, y, indexing='ij')
gx, gy = np.float32(gx), np.float32(gy)
grid = np.concatenate([gx.ravel()[None, :], gy.ravel()[None, :]], axis=0)
return grid.T.reshape(x.size, y.size, 2)
grid = meshgrid(np.linspace(-2, 2, 100, dtype=np.float32))
plt.contour(grid[..., 0], grid[..., 1], gm.prob(grid).eval());
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
mixture_distribution,
components_distribution,
validate_args=False,
allow_nan_stats=True,
name="MixtureSameFamily"):
"""Construct a `MixtureSameFamily` distribution.
Args:
mixture_distribution: `tfp.distributions.Categorical`-like instance.
Manages the probability of selecting components. The number of
categories must match the rightmost batch dimension of the
`components_distribution`. Must have either scalar `batch_shape` or
`batch_shape` matching `components_distribution.batch_shape[:-1]`.
components_distribution: `tfp.distributions.Distribution`-like instance.
Right-most batch dimension indexes components.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: `if not mixture_distribution.dtype.is_integer`.
ValueError: if mixture_distribution does not have scalar `event_shape`.
ValueError: if `mixture_distribution.batch_shape` and
`components_distribution.batch_shape[:-1]` are both fully defined and
the former is neither scalar nor equal to the latter.
ValueError: if `mixture_distribution` categories does not equal
`components_distribution` rightmost batch shape.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
self._mixture_distribution = mixture_distribution
self._components_distribution = components_distribution
self._runtime_assertions = []
s = components_distribution.event_shape_tensor()
s_dim0 = tensor_shape.dimension_value(s.shape[0])
self._event_ndims = (s_dim0
if s_dim0 is not None
else array_ops.shape(s)[0])
if not mixture_distribution.dtype.is_integer:
raise ValueError(
"`mixture_distribution.dtype` ({}) is not over integers".format(
mixture_distribution.dtype.name))
if (mixture_distribution.event_shape.ndims is not None
and mixture_distribution.event_shape.ndims != 0):
raise ValueError("`mixture_distribution` must have scalar `event_dim`s")
elif validate_args:
self._runtime_assertions += [
control_flow_ops.assert_has_rank(
mixture_distribution.event_shape_tensor(), 0,
message="`mixture_distribution` must have scalar `event_dim`s"),
]
mdbs = mixture_distribution.batch_shape
cdbs = components_distribution.batch_shape.with_rank_at_least(1)[:-1]
if mdbs.is_fully_defined() and cdbs.is_fully_defined():
if mdbs.ndims != 0 and mdbs != cdbs:
raise ValueError(
"`mixture_distribution.batch_shape` (`{}`) is not "
"compatible with `components_distribution.batch_shape` "
"(`{}`)".format(mdbs.as_list(), cdbs.as_list()))
elif validate_args:
mdbs = mixture_distribution.batch_shape_tensor()
cdbs = components_distribution.batch_shape_tensor()[:-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
distribution_util.pick_vector(
mixture_distribution.is_scalar_batch(), cdbs, mdbs),
cdbs,
message=(
"`mixture_distribution.batch_shape` is not "
"compatible with `components_distribution.batch_shape`"))]
km = tensor_shape.dimension_value(
mixture_distribution.logits.shape.with_rank_at_least(1)[-1])
kc = tensor_shape.dimension_value(
components_distribution.batch_shape.with_rank_at_least(1)[-1])
if km is not None and kc is not None and km != kc:
raise ValueError("`mixture_distribution components` ({}) does not "
"equal `components_distribution.batch_shape[-1]` "
"({})".format(km, kc))
elif validate_args:
km = array_ops.shape(mixture_distribution.logits)[-1]
kc = components_distribution.batch_shape_tensor()[-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
km, kc,
message=("`mixture_distribution components` does not equal "
"`components_distribution.batch_shape[-1:]`")),
]
elif km is None:
km = array_ops.shape(mixture_distribution.logits)[-1]
self._num_components = km
super(MixtureSameFamily, self).__init__(
dtype=self._components_distribution.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
self._mixture_distribution._graph_parents # pylint: disable=protected-access
+ self._components_distribution._graph_parents), # pylint: disable=protected-access
name=name)
@property
def mixture_distribution(self):
return self._mixture_distribution
@property
def components_distribution(self):
return self._components_distribution
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.batch_shape_tensor()[:-1]
def _batch_shape(self):
return self.components_distribution.batch_shape.with_rank_at_least(1)[:-1]
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.event_shape_tensor()
def _event_shape(self):
return self.components_distribution.event_shape
def _sample_n(self, n, seed):
with ops.control_dependencies(self._runtime_assertions):
x = self.components_distribution.sample(n) # [n, B, k, E]
# TODO(jvdillon): Consider using tf.gather (by way of index unrolling).
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=self.mixture_distribution.sample(n), # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_utils.pad_mixture_dimensions(
mask, self, self.mixture_distribution,
self._event_shape().ndims) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask, axis=-1 - self._event_ndims) # [n, B, E]
def _log_prob(self, x):
with ops.control_dependencies(self._runtime_assertions):
x = self._pad_sample_dims(x)
log_prob_x = self.components_distribution.log_prob(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, axis=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_prob_x + log_mix_prob, axis=-1) # [S, B]
def _mean(self):
with ops.control_dependencies(self._runtime_assertions):
probs = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, [1]*e]
return math_ops.reduce_sum(
probs * self.components_distribution.mean(),
axis=-1 - self._event_ndims) # [B, E]
def _log_cdf(self, x):
x = self._pad_sample_dims(x)
log_cdf_x = self.components_distribution.log_cdf(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, axis=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_cdf_x + log_mix_prob, axis=-1) # [S, B]
def _variance(self):
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, [1]*e]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.variance(),
axis=-1 - self._event_ndims) # [B, E]
var_cond_mean = math_ops.reduce_sum(
probs * math_ops.squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-1 - self._event_ndims) # [B, E]
return mean_cond_var + var_cond_mean # [B, E]
def _covariance(self):
static_event_ndims = self.event_shape.ndims
if static_event_ndims != 1:
# Covariance is defined only for vector distributions.
raise NotImplementedError("covariance is not implemented")
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = distribution_utils.pad_mixture_dimensions(
distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims),
self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, 1, 1]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.covariance(),
axis=-3) # [B, e, e]
var_cond_mean = math_ops.reduce_sum(
probs * _outer_squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-3) # [B, e, e]
return mean_cond_var + var_cond_mean # [B, e, e]
def _pad_sample_dims(self, x):
with ops.name_scope("pad_sample_dims", values=[x]):
ndims = x.shape.ndims if x.shape.ndims is not None else array_ops.rank(x)
shape = array_ops.shape(x)
d = ndims - self._event_ndims
x = array_ops.reshape(x, shape=array_ops.concat([
shape[:d], [1], shape[d:]], axis=0))
return x
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _outer_squared_difference(x, y):
"""Convenience function analogous to tf.squared_difference."""
z = x - y
return z[..., array_ops.newaxis, :] * z[..., array_ops.newaxis]
| apache-2.0 |
k323r/flugkatze | cockpit/python/live_blit.py | 1 | 1899 | #!/bin/python
#!/usr/bin/env python
import numpy as np
import time
import matplotlib
matplotlib.use('TKagg')
from matplotlib import pyplot as plt
def randomwalk(dims=(256, 256), n=20, sigma=5, alpha=0.95, seed=1):
""" A simple random walk with memory """
r, c = dims
gen = np.random.RandomState(seed)
pos = gen.rand(2, n) * ((r,), (c,))
old_delta = gen.randn(2, n) * sigma
while True:
delta = (1. - alpha) * gen.randn(2, n) * sigma + alpha * old_delta
pos += delta
for ii in xrange(n):
if not (0. <= pos[0, ii] < r):
pos[0, ii] = abs(pos[0, ii] % r)
if not (0. <= pos[1, ii] < c):
pos[1, ii] = abs(pos[1, ii] % c)
old_delta = delta
yield pos
def run(niter=1000, doblit=True):
"""
Display the simulation using matplotlib, optionally using blit for speed
"""
fig, ax = plt.subplots(1, 1)
ax.set_aspect('equal')
ax.set_xlim(0, 255)
ax.set_ylim(0, 255)
ax.hold(True)
rw = randomwalk()
x, y = rw.next()
plt.show(False)
plt.draw()
if doblit:
# cache the background
background = fig.canvas.copy_from_bbox(ax.bbox)
points = ax.plot(x, y, 'o')[0]
tic = time.time()
for ii in xrange(niter):
# update the xy data
x, y = rw.next()
points.set_data(x, y)
if doblit:
# restore background
fig.canvas.restore_region(background)
# redraw just the points
ax.draw_artist(points)
# fill in the axes rectangle
fig.canvas.blit(ax.bbox)
else:
# redraw everything
fig.canvas.draw()
plt.close(fig)
print "Blit = %s, average FPS: %.2f" % (
str(doblit), niter / (time.time() - tic))
if __name__ == '__main__':
run(doblit=False)
run(doblit=True)
| mit |
Eric89GXL/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 12 | 1434 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
pl.figure()
pl.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=pl.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = pl.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = pl.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
pl.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
msmbuilder/msmbuilder | msmbuilder/project_templates/cluster/cluster-plot.py | 9 | 1089 | """Plot cluster centers on tICA coordinates
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from msmbuilder.io import load_trajs, load_generic
sns.set_style('ticks')
colors = sns.color_palette()
## Load
kmeans = load_generic('kmeans.pickl')
meta, ktrajs = load_trajs('ktrajs')
meta, ttrajs = load_trajs('ttrajs', meta)
txx = np.concatenate(list(ttrajs.values()))
def plot_cluster_centers(ax):
ax.hexbin(txx[:, 0], txx[:, 1],
cmap=sns.cubehelix_palette(as_cmap=True),
mincnt=1,
bins='log',
)
ax.scatter(kmeans.cluster_centers_[:, 0],
kmeans.cluster_centers_[:, 1],
s=40, c=colors[0],
)
ax.set_xlabel("tIC 1", fontsize=16)
ax.set_ylabel("tIC 2", fontsize=16)
## Plot 1
fig, ax = plt.subplots(figsize=(7, 5))
plot_cluster_centers(ax)
fig.tight_layout()
fig.savefig('kmeans-centers.pdf')
# {{xdg_open('kmeans-centers.pdf')}}
| lgpl-2.1 |
mikebenfield/scipy | tools/refguide_check.py | 5 | 28531 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
import glob
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser, REMAINDER
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'special',
'stats',
'stats.mstats',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots' # old aliases for scipy.special.*_roots
]
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
int_pattern = re.compile('^[0-9]+L?$')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted'}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# python 2 long integers are equal to python 3 integers
if self.int_pattern.match(want) and self.int_pattern.match(got):
if want.rstrip("L\r\n") == got.rstrip("L\r\n"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
return success, output
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
text = open(fname).read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
tut_path = os.path.join(os.getcwd(), 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % tut_path)
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
SylvainCorlay/PyDev.Debugger | pydev_ipython/inputhook.py | 52 | 18411 | # coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt4",
"disable_qt4",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
| epl-1.0 |
zhongyx12/Faster-RCNN-Refinement | faster_rcnn/roi_data_layer/minibatch.py | 5 | 8725 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
import os
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
from ..utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
if cfg.TRAIN.HAS_RPN:
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds] \
if 'gt_ishard' in roidb[0] else np.zeros(gt_inds.size, dtype=int)
# blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds]
blobs['dontcare_areas'] = roidb[0]['dontcare_areas'] * im_scales[0] \
if 'dontcare_areas' in roidb[0] else np.zeros([0, 4], dtype=float)
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
blobs['im_name'] = os.path.basename(roidb[0]['image'])
else: # not using RPN
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_inside_weights \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_inside_weights'] = bbox_inside_blob
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32)
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][keep_inds, :], num_classes)
return labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
mlovci/mpld3 | mpld3/test_plots/test_text.py | 21 | 1305 | """Plot to test text"""
import matplotlib.pyplot as plt
import mpld3
def create_plot():
fig, ax = plt.subplots()
ax.grid(color='gray')
# test font sizes
x = 0.1
for y, size in zip([0.1, 0.3, 0.5, 0.7, 0.9],
[8, 12, 16, 20, 24]):
ax.text(x, y, "size={0}".format(size), size=size, ha='left')
# test horizontal alignment
x = 0.5
for y, align in zip([0.2, 0.4, 0.6],
['left', 'center', 'right']):
ax.text(x, y, "ha=" + align, ha=align, size=20)
# test vertical alignment
y = 0.9
for x, align in zip([0.5, 0.7, 0.9],
['top', 'center', 'bottom']):
ax.text(x, y, "va=" + align, ha='center', va=align, size=14)
# test colors & rotations
x = 0.8
for y, c, r in zip([0.15, 0.4, 0.65],
['red', 'blue', 'green'],
[-45, 0, 45]):
ax.text(x, y, "{0} rot={1}".format(c, r),
size=18, color=c, rotation=r, ha='center', va='center')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_title('title', size=20)
return fig
def test_text():
fig = create_plot()
html = mpld3.fig_to_html(fig)
plt.close(fig)
if __name__ == "__main__":
mpld3.show(create_plot())
| bsd-3-clause |
michaelpacer/dynd-python | dynd/benchmarks/benchmark_random.py | 8 | 1751 | from operator import add
from dynd import nd, ndt
import matplotlib
import matplotlib.pyplot
from benchrun import Benchmark, median
from benchtime import Timer, CUDATimer
#size = [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000]
size = [10, 100, 1000, 10000, 100000, 1000000, 10000000]
class UniformBenchmark(Benchmark):
parameters = ('size',)
size = size
def __init__(self, cuda = False):
Benchmark.__init__(self)
self.cuda = cuda
@median
def run(self, size):
if self.cuda:
dst_tp = ndt.type('cuda_device[{} * float64]'.format(size))
else:
dst_tp = ndt.type('{} * float64'.format(size))
dst = nd.empty(dst_tp)
with CUDATimer() if self.cuda else Timer() as timer:
nd.uniform(dst_tp = dst_tp)
return timer.elapsed_time()
class NumPyUniformBenchmark(Benchmark):
parameters = ('size',)
size = size
@median
def run(self, size):
import numpy as np
with Timer() as timer:
np.random.uniform(size = size)
return timer.elapsed_time()
class PyCUDAUniformBenchmark(Benchmark):
parameters = ('size',)
size = size
def __init__(self, gen):
Benchmark.__init__(self)
self.gen = gen
@median
def run(self, size):
import numpy as np
with CUDATimer() as timer:
self.gen.gen_uniform(size, np.float64)
return timer.elapsed_time()
if __name__ == '__main__':
cuda = True
benchmark = UniformBenchmark(cuda = cuda)
benchmark.plot_result(loglog = True)
benchmark = NumPyUniformBenchmark()
benchmark.plot_result(loglog = True)
if cuda:
from pycuda import curandom
benchmark = PyCUDAUniformBenchmark(curandom.XORWOWRandomNumberGenerator())
benchmark.plot_result(loglog = True)
matplotlib.pyplot.show() | bsd-2-clause |
xingjiepan/figurePXJ | figurePXJ/curve_plots.py | 1 | 1131 | import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from .Figure import Figure
class SmoothCurvePlot(Figure):
'''A class for creating a plot of smooth curves.
The curves are specified in the info_dict as
'curves' : [([x10, x11, ...], [y10, y11, ...]), ...]
'''
def __init__(self, info_dict={}):
super().__init__(info_dict)
def make_plot(self):
for i, curve in enumerate(self.info_dict['curves']):
self.plot_one_curve(curve, i)
self.set_axes()
self.draw_auxiliary_lines()
self.plot_labels()
self.plot_title()
def plot_one_curve(self, curve, index):
# Interpolate the input points to get a smooth curve
x = curve[0]
y = curve[1]
interpolate_curve = interp1d(x, y, kind='cubic')
new_x = np.linspace(min(x), max(x), 200)
new_y = interpolate_curve(new_x)
# Draw the plot
color = self.info_dict['style'].color('normal', 'basics', index)
plt.plot(new_x, new_y,
color=color, linewidth=4)
| gpl-3.0 |
dssg/wikienergy | tests/test_appliance_trace.py | 1 | 1149 | import sys
import os.path
sys.path.append(os.path.abspath(os.pardir))
import disaggregator as da
import unittest
import pandas as pd
import numpy as np
class ApplianceTraceTestCase(unittest.TestCase):
def setUp(self):
index = pd.date_range('1/1/2013', periods=200, freq='15T')
data = np.zeros(200)
series = pd.Series(data, index=index)
self.normal_trace = da.ApplianceTrace(series,{})
def test_get_metadata(self):
self.assertIsInstance(self.normal_trace.metadata,dict,
'metadata should be a dict')
def test_get_sampling_rate(self):
self.assertEqual(self.normal_trace.get_sampling_rate(),'15T',
'sampling rate of test should be "15T"')
def test_get_series(self):
self.assertIsInstance(self.normal_trace.series,pd.Series,
'series should be pd.Series')
self.assertIsInstance(self.normal_trace.series.index,
pd.DatetimeIndex,
'trace series index should be pd.DatetimeIndex')
if __name__ == "__main__":
unittest.main()
| mit |
harmslab/epistasis | setup.py | 2 | 3501 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command, Extension
# Package meta-data.
NAME = 'epistasis'
DESCRIPTION = 'A Python API for estimating statistical high-order epistasis in genotype-phenotype maps.'
URL = 'https://github.com/harmslab/epistasis'
EMAIL = '[email protected]'
AUTHOR = 'Zachary R. Sailer'
REQUIRES_PYTHON = '>=3.3.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
"cython",
"numpy>=1.15.2",
"pandas>=0.24.2",
"scikit-learn>=0.20.0",
"scipy>=1.1.0",
"emcee>=2.2.1",
"lmfit>=0.9.11",
"matplotlib>=3.0.0",
"gpmap>=0.6.0",
]
# Hanlding a Cython extension is a pain! Need to
# import numpy before it's installed... Used this
# Stackoverflow solution:
# https://stackoverflow.com/a/42163080
try:
from Cython.setuptools import build_ext
except:
# If we couldn't import Cython, use the normal setuptools
# and look for a pre-compiled .c file instead of a .pyx file
from setuptools.command.build_ext import build_ext
extension = Extension("epistasis.matrix_cython", ["epistasis/matrix_cython.c"])
else:
# If we successfully imported Cython, look for a .pyx file
extension = Extension("epistasis.matrix_cython", ["epistasis/matrix_cython.pyx"])
class CustomBuildExtCommand(build_ext):
"""build_ext command for use when numpy headers are needed."""
def run(self):
# Import numpy here, only when headers are needed
import numpy
# Add numpy headers to include_dirs
self.include_dirs.append(numpy.get_include())
# Call original build_ext command
build_ext.run(self)
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
ext_modules=[extension],
install_requires=REQUIRED,
extras_require = {
'test': ['pytest'],
},
include_package_data=True,
license='UNLICENSE',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='epistasis high-order genetics genotype-phenotype-maps',
cmdclass = {'build_ext': CustomBuildExtCommand},
)
| unlicense |
HighEnergyDataScientests/bnpcompetition | scripts/two_layer_training.py | 1 | 6135 | #!/usr/bin/python
###################################################################################################################
### This code is developed by DataTistics team on kaggle
### Do not copy or modify without written approval from one of the team members.
###################################################################################################################
#
############################### This is two_layer_training module #################################################
import numpy as np
from sklearn.metrics import log_loss
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegressionCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import SGDClassifier
import xgboost as xgb
#fixing random state
random_state=1
def First_layer_classifiers (X, X_train, X_valid, X_test, y, y_train, y_valid, y_test, temp_test):
#Parameters and data for XGBoost clissifier
num_boost_round = 5000
params = {"objective": "binary:logistic",
"eta": 0.03,
"nthread":3,
"max_depth": 6,
"subsample": 0.67,
"colsample_bytree": 0.9,
"eval_metric": "logloss",
"n_estimators": 100,
"silent": 1,
"seed": 93425
}
#Defining the classifiers
clfs = {'LRC' : LogisticRegression(n_jobs=-1, random_state=random_state),
'SVM' : SVC(probability=True, max_iter=100, random_state=random_state),
'RFC' : RandomForestClassifier(n_estimators=100, n_jobs=-1,
random_state=random_state),
'GBM' : GradientBoostingClassifier(n_estimators=50,
random_state=random_state),
'ETC' : ExtraTreesClassifier(n_estimators=100, n_jobs=-1,
random_state=random_state),
'KNN' : KNeighborsClassifier(n_neighbors=30, n_jobs=-1),
'ABC' : AdaBoostClassifier(DecisionTreeClassifier(max_depth=30),
algorithm="SAMME", n_estimators=350,
random_state=random_state),
'SGD' : SGDClassifier(loss="log", n_iter = 1100, n_jobs=-1,
random_state=random_state),
'DTC' : DecisionTreeClassifier(max_depth=7, random_state=random_state),
'XGB' : 'XGB'
}
#predictions on the validation and test sets
p_valid = []
p_test = []
p_ttest_t = []
print('')
print('Performance of individual classifiers (1st layer) on X_test')
print('-----------------------------------------------------------')
for model_name, clf in clfs.items():
#First run. Training on (X_train, y_train) and predicting on X_valid.
if model_name == 'XGB':
dtrain = xgb.DMatrix(X_train, y_train)
dvalid = xgb.DMatrix(X_valid, y_valid)
watchlist = [(dtrain, 'train'),(dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=100, verbose_eval=True)
print("Best Score for this run : " + str(gbm.best_score))
y_xgb = gbm.predict(xgb.DMatrix(X_valid),ntree_limit=gbm.best_ntree_limit)
#
one = np.ones(shape=(y_xgb.shape))
q = np.subtract(one, y_xgb)
yv = np.column_stack((q,y_xgb))
else:
clf.fit(X_train, y_train)
yv = clf.predict_proba(X_valid)
p_valid.append(yv)
#Printing out the performance of the classifier
print('{:10s} {:2s} {:1.7f}'.format('%s: ' %(model_name), 'logloss_val =>', log_loss(y_valid, yv[:,1])))
#Second run. Training on (X, y) and predicting on X_test.
if model_name == 'XGB':
dtrain = xgb.DMatrix(X, y)
dvalid = xgb.DMatrix(X_test, y_test)
watchlist = [(dtrain, 'train'),(dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=100, verbose_eval=True)
print("Best Score for this run : " + str(gbm.best_score))
y_xgb_test = gbm.predict(xgb.DMatrix(X_test),ntree_limit=gbm.best_ntree_limit)
y_xgb_ttest = gbm.predict(xgb.DMatrix(temp_test),ntree_limit=gbm.best_ntree_limit)
#
one_test = np.ones(shape=(y_xgb_test.shape))
q_test = np.subtract(one_test, y_xgb_test)
yt = np.column_stack((q_test,y_xgb_test))
#
one_ttest = np.ones(shape=(y_xgb_ttest.shape))
q_ttest = np.subtract(one_ttest, y_xgb_ttest)
yt_tt = np.column_stack((q_ttest,y_xgb_ttest))
else:
clf.fit(X, y)
yt = clf.predict_proba(X_test)
yt_tt = clf.predict_proba(temp_test)
p_test.append(yt)
p_ttest_t.append(yt_tt)
#Printing out the performance of the classifier
print('{:10s} {:2s} {:1.7f}'.format('%s: ' %(model_name), 'logloss_test =>', log_loss(y_test, yt[:,1])))
print('')
return p_valid, p_test, p_ttest_t
def Second_layer_ensembling (p_valid, p_test, y_valid, y_test, p_ttest_t):
print('')
print('Performance of optimization based ensemblers (2nd layer) on X_test')
print('------------------------------------------------------------------')
#Creating the data for the 2nd layer
XV = np.hstack(p_valid)
XT = np.hstack(p_test)
XTTT = np.hstack(p_ttest_t)
clf = LogisticRegressionCV(scoring='log_loss', random_state=random_state)
clf = clf.fit(XV, y_valid)
yT = clf.predict_proba(XT)
yt_out = clf.predict_proba(XTTT)
print('{:20s} {:2s} {:1.7f}'.format('Ensemble of Classifiers', 'logloss_ensembled =>', log_loss(y_test, yT[:,1])))
return yt_out | apache-2.0 |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/testing/jpl_units/UnitDblConverter.py | 3 | 5413 | #===========================================================================
#
# UnitDblConverter
#
#===========================================================================
"""UnitDblConverter module containing class UnitDblConverter."""
#===========================================================================
# Place all imports after here.
#
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
import matplotlib.projections.polar as polar
from matplotlib.cbook import iterable
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblConverter' ]
#===========================================================================
# A special function for use with the matplotlib FuncFormatter class
# for formatting axes with radian units.
# This was copied from matplotlib example code.
def rad_fn(x, pos = None ):
"""Radian function formatter."""
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return str(x)
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return r'$%s\pi$' % (n/2,)
else:
return r'$%s\pi/2$' % (n,)
#===========================================================================
class UnitDblConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for the Monte UnitDbl class.
"""
# default for plotting
defaults = {
"distance" : 'km',
"angle" : 'deg',
"time" : 'sec',
}
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has Epoch data.
= INPUT VARIABLES
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
# Check to see if the value used for units is a string unit value
# or an actual instance of a UnitDbl so that we can use the unit
# value for the default axis label value.
if ( unit ):
if ( isinstance( unit, str ) ):
label = unit
else:
label = unit.label()
else:
label = None
if ( label == "deg" ) and isinstance( axis.axes, polar.PolarAxes ):
# If we want degrees for a polar plot, use the PolarPlotFormatter
majfmt = polar.PolarAxes.ThetaFormatter()
else:
majfmt = U.UnitDblFormatter( useOffset = False )
return units.AxisInfo( majfmt = majfmt, label = label )
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- value The value or list of values that need to be converted.
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
isNotUnitDbl = True
if ( iterable(value) and not isinstance(value, str) ):
if ( len(value) == 0 ):
return []
else:
return [ UnitDblConverter.convert( x, unit, axis ) for x in value ]
# We need to check to see if the incoming value is actually a UnitDbl and
# set a flag. If we get an empty list, then just return an empty list.
if ( isinstance(value, U.UnitDbl) ):
isNotUnitDbl = False
# If the incoming value behaves like a number, but is not a UnitDbl,
# then just return it because we don't know how to convert it
# (or it is already converted)
if ( isNotUnitDbl and units.ConversionInterface.is_numlike( value ) ):
return value
# If no units were specified, then get the default units to use.
if ( unit == None ):
unit = UnitDblConverter.default_units( value, axis )
# Convert the incoming UnitDbl value/values to float/floats
if isinstance( axis.axes, polar.PolarAxes ) and (value.type() == "angle"):
# Guarantee that units are radians for polar plots.
return value.convert( "rad" )
return value.convert( unit )
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
Return the default unit for value, or None.
"""
# Determine the default units based on the user preferences set for
# default units when printing a UnitDbl.
if ( iterable(value) and not isinstance(value, str) ):
return UnitDblConverter.default_units( value[0], axis )
else:
return UnitDblConverter.defaults[ value.type() ]
| gpl-3.0 |
yandex/rep | rep/utils.py | 1 | 18685 | """
Different helpful functions, objects, methods are collected here.
"""
from __future__ import division, print_function, absolute_import
from collections import OrderedDict
import time
import numpy
import pandas
from sklearn.utils.validation import column_or_1d
from sklearn.metrics import roc_curve
def weighted_quantile(array, quantiles, sample_weight=None, array_sorted=False, old_style=False):
"""Computing quantiles of array. Unlike the numpy.percentile, this function supports weights,
but it is inefficient and performs complete sorting.
:param array: distribution, array of shape [n_samples]
:param quantiles: floats from range [0, 1] with quantiles of shape [n_quantiles]
:param sample_weight: optional weights of samples, array of shape [n_samples]
:param array_sorted: if True, the sorting step will be skipped
:param old_style: if True, will correct output to be consistent with numpy.percentile.
:return: array of shape [n_quantiles]
Example:
>>> weighted_quantile([1, 2, 3, 4, 5], [0.5])
Out: array([ 3.])
>>> weighted_quantile([1, 2, 3, 4, 5], [0.5], sample_weight=[3, 1, 1, 1, 1])
Out: array([ 2.])
"""
array = numpy.array(array)
quantiles = numpy.array(quantiles)
sample_weight = check_sample_weight(array, sample_weight)
assert numpy.all(quantiles >= 0) and numpy.all(quantiles <= 1), 'Percentiles should be in [0, 1]'
if not array_sorted:
array, sample_weight = reorder_by_first(array, sample_weight)
weighted_quantiles = numpy.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= numpy.sum(sample_weight)
return numpy.interp(quantiles, weighted_quantiles, array)
def reorder_by_first(*arrays):
"""
Applies the same permutation to all passed arrays,
permutation sorts the first passed array
"""
arrays = check_arrays(*arrays)
order = numpy.argsort(arrays[0])
return [arr[order] for arr in arrays]
def check_sample_weight(y_true, sample_weight):
"""Checks the weights, if None, returns array.
:param y_true: labels (or any array of length [n_samples])
:param sample_weight: None or array of length [n_samples]
:return: numpy.array of shape [n_samples]
"""
if sample_weight is None:
return numpy.ones(len(y_true), dtype=numpy.float)
else:
sample_weight = numpy.array(sample_weight, dtype=numpy.float)
assert len(y_true) == len(sample_weight), \
"The length of weights is different: not {0}, but {1}".format(len(y_true), len(sample_weight))
return sample_weight
class Flattener(object):
def __init__(self, data, sample_weight=None):
"""
Prepares normalization function for some set of values
transforms it to uniform distribution from [0, 1].
:param data: predictions
:type data: list or numpy.array
:param sample_weight: weights
:type sample_weight: None or list or numpy.array
:return func: normalization function
Example of usage:
>>> normalizer = Flattener(signal)
>>> hist(normalizer(background))
>>> hist(normalizer(signal))
"""
sample_weight = check_sample_weight(data, sample_weight=sample_weight)
data = column_or_1d(data)
assert numpy.all(sample_weight >= 0.), 'sample weight must be non-negative'
self.data, sample_weight = reorder_by_first(data, sample_weight)
self.predictions = numpy.cumsum(sample_weight) / numpy.sum(sample_weight)
def __call__(self, data):
return numpy.interp(data, self.data, self.predictions)
class Binner(object):
def __init__(self, values, bins_number):
"""
Binner is a class that helps to split the values into several bins.
Initially an array of values is given, which is then splitted into 'bins_number' equal parts,
and thus we are computing limits (boundaries of bins).
"""
percentiles = [i * 100.0 / bins_number for i in range(1, bins_number)]
self.limits = numpy.percentile(values, percentiles)
def get_bins(self, values):
"""Given the values of feature, compute the index of bin
:param values: array of shape [n_samples]
:return: array of shape [n_samples]
"""
return numpy.searchsorted(self.limits, values)
def set_limits(self, limits):
"""Change the thresholds inside bins."""
self.limits = limits
@property
def bins_number(self):
""":return: number of bins"""
return len(self.limits) + 1
def split_into_bins(self, *arrays):
"""
:param arrays: data to be splitted, the first array corresponds
:return: sequence of length [n_bins] with values corresponding to each bin.
"""
values = arrays[0]
for array in arrays:
assert len(array) == len(values), "passed arrays have different length"
bins = self.get_bins(values)
result = []
for bin in range(len(self.limits) + 1):
indices = bins == bin
result.append([numpy.array(array)[indices] for array in arrays])
return result
def calc_ROC(prediction, signal, sample_weight=None, max_points=10000):
"""
Calculate roc curve, returns limited number of points.
This is needed for interactive plots, which suffer
:param prediction: predictions
:type prediction: numpy.ndarray or list
:param signal: true labels
:type signal: array or list
:param sample_weight: weights
:type sample_weight: None or array or list
:param int max_points: maximum of used points on roc curve
:return: (tpr, tnr), (err_tnr, err_tpr), thresholds
"""
sample_weight = numpy.ones(len(signal)) if sample_weight is None else sample_weight
prediction, signal, sample_weight = check_arrays(prediction, signal, sample_weight)
assert set(signal) == {0, 1}, "the labels should be 0 and 1, labels are " + str(set(signal))
fpr, tpr, thresholds = roc_curve(signal, prediction, sample_weight=sample_weight)
tpr = numpy.insert(tpr, 0, [0.])
fpr = numpy.insert(fpr, 0, [0.])
thresholds = numpy.insert(thresholds, 0, [thresholds[0] + 1.])
tnr = 1 - fpr
weight_bck = sample_weight[signal == 0]
weight_sig = sample_weight[signal == 1]
err_tnr = numpy.sqrt(tnr * (1 - tnr) * numpy.sum(weight_bck ** 2)) / numpy.sum(weight_bck)
err_tpr = numpy.sqrt(tpr * (1 - tpr) * numpy.sum(weight_sig ** 2)) / numpy.sum(weight_sig)
if len(prediction) > max_points:
sum_weights = numpy.cumsum((fpr + tpr) / 2.)
sum_weights /= sum_weights[-1]
positions = numpy.searchsorted(sum_weights, numpy.linspace(0, 1, max_points))
tpr, tnr = tpr[positions], tnr[positions]
err_tnr, err_tpr = err_tnr[positions], err_tpr[positions]
thresholds = thresholds[positions]
return (tpr, tnr), (err_tnr, err_tpr), thresholds
def calc_feature_correlation_matrix(df, weights=None):
"""
Calculate correlation matrix
:param pandas.DataFrame df: data of shape [n_samples, n_features]
:param weights: weights of shape [n_samples] (optional)
:return: correlation matrix for dataFrame of shape [n_features, n_features]
:rtype: numpy.ndarray
"""
values = numpy.array(df)
weights = check_sample_weight(df, sample_weight=weights)
means = numpy.average(values, weights=weights, axis=0)
values -= means
covariation = values.T.dot(values * weights[:, None])
diag = covariation.diagonal()
return covariation / numpy.sqrt(diag)[:, None] / numpy.sqrt(diag)[None, :]
def calc_hist_with_errors(x, weight=None, bins=60, normed=True, x_range=None, ignored_sideband=0.0):
"""
Calculate data for error bar (for plot pdf with errors)
:param x: data
:type x: list or numpy.array
:param weight: weights
:type weight: None or list or numpy.array
:return: tuple (x-points (list), y-points (list), y points errors (list), x points errors (list))
"""
weight = numpy.ones(len(x)) if weight is None else weight
x, weight = check_arrays(x, weight)
if x_range is None:
x_range = numpy.percentile(x, [100 * ignored_sideband, 100 * (1 - ignored_sideband)])
ans, bins = numpy.histogram(x, bins=bins, normed=normed, weights=weight, range=x_range)
yerr = []
normalization = 1.0
if normed:
normalization = float(len(bins) - 1) / float(sum(weight)) / (x_range[1] - x_range[0])
for i in range(len(bins) - 1):
weight_bin = weight[(x > bins[i]) * (x <= bins[i + 1])]
yerr.append(numpy.sqrt(sum(weight_bin * weight_bin)) * normalization)
bins_mean = [0.5 * (bins[i] + bins[i + 1]) for i in range(len(ans))]
xerr = [0.5 * (bins[i + 1] - bins[i]) for i in range(len(ans))]
return bins_mean, ans, yerr, xerr
def get_efficiencies(prediction, spectator, sample_weight=None, bins_number=20,
thresholds=None, errors=False, ignored_sideband=0.0):
"""
Construct efficiency function dependent on spectator for each threshold
Different score functions available: Efficiency, Precision, Recall, F1Score,
and other things from sklearn.metrics
:param prediction: list of probabilities
:param spectator: list of spectator's values
:param bins_number: int, count of bins for plot
:param thresholds: list of prediction's threshold
(default=prediction's cuts for which efficiency will be [0.2, 0.4, 0.5, 0.6, 0.8])
:return:
if errors=False
OrderedDict threshold -> (x_values, y_values)
if errors=True
OrderedDict threshold -> (x_values, y_values, y_err, x_err)
All the parts: x_values, y_values, y_err, x_err are numpy.arrays of the same length.
"""
prediction, spectator, sample_weight = \
check_arrays(prediction, spectator, sample_weight)
spectator_min, spectator_max = weighted_quantile(spectator, [ignored_sideband, (1. - ignored_sideband)])
mask = (spectator >= spectator_min) & (spectator <= spectator_max)
spectator = spectator[mask]
prediction = prediction[mask]
bins_number = min(bins_number, len(prediction))
sample_weight = sample_weight if sample_weight is None else numpy.array(sample_weight)[mask]
if thresholds is None:
thresholds = [weighted_quantile(prediction, quantiles=1 - eff, sample_weight=sample_weight)
for eff in [0.2, 0.4, 0.5, 0.6, 0.8]]
binner = Binner(spectator, bins_number=bins_number)
if sample_weight is None:
sample_weight = numpy.ones(len(prediction))
bins_data = binner.split_into_bins(spectator, prediction, sample_weight)
bin_edges = numpy.array([spectator_min] + list(binner.limits) + [spectator_max])
xerr = numpy.diff(bin_edges) / 2.
result = OrderedDict()
for threshold in thresholds:
x_values = []
y_values = []
N_in_bin = []
for num, (masses, probabilities, weights) in enumerate(bins_data):
y_values.append(numpy.average(probabilities > threshold, weights=weights))
N_in_bin.append(numpy.sum(weights))
if errors:
x_values.append((bin_edges[num + 1] + bin_edges[num]) / 2.)
else:
x_values.append(numpy.mean(masses))
x_values, y_values, N_in_bin = check_arrays(x_values, y_values, N_in_bin)
if errors:
result[threshold] = (x_values, y_values, numpy.sqrt(y_values * (1 - y_values) / N_in_bin), xerr)
else:
result[threshold] = (x_values, y_values)
return result
def train_test_split(*arrays, **kw_args):
"""
Does the same thing as sklearn.cross_validation.train_test_split.
Additionally has 'allow_none' parameter.
:param arrays: arrays to split with same first dimension
:type arrays: list[numpy.array] or list[pandas.DataFrame]
:param bool allow_none: default False, is set to True, allows
non-first arguments to be None (in this case, both resulting train and test parts are None).
"""
from sklearn import cross_validation
allow_none = kw_args.pop('allow_none', False)
assert len(arrays) > 0, "at least one array should be passed"
length = len(arrays[0])
for array in arrays:
assert len(array) == length, "different size"
train_indices, test_indices = cross_validation.train_test_split(range(length), **kw_args)
result = []
for array in arrays:
if isinstance(array, pandas.DataFrame):
result.append(array.iloc[train_indices, :])
result.append(array.iloc[test_indices, :])
elif (array is None) and allow_none:
# specially for checking weights
result.append(None)
result.append(None)
else:
result.append(numpy.array(array)[train_indices])
result.append(numpy.array(array)[test_indices])
return result
def train_test_split_group(group_column, *arrays, **kw_args):
"""
Modification of :class:`train_test_split` which alters splitting rule.
:param group_column: array-like of shape [n_samples] with indices of groups,
events from one group will be kept together (all events in train or all events in test).
If `group_column` is used, train_size and test_size will refer to number of groups, not events
:param arrays: arrays to split
:type arrays: list[numpy.array] or list[pandas.DataFrame]
:param bool allow_none: default False
(useful for sample_weight - after splitting train and test of `None` are again `None`)
"""
from sklearn import cross_validation
allow_none = kw_args.pop('allow_none', None)
assert len(arrays) > 0, "at least one array should be passed"
length = len(arrays[0])
for array in arrays:
assert len(array) == length, "different size"
initial_data = numpy.array(group_column)
assert len(initial_data) == length, "group column must have the same length"
group_ids = numpy.unique(initial_data)
train_indices, test_indices = cross_validation.train_test_split(group_ids, **kw_args)
train_indices = numpy.in1d(initial_data, train_indices)
test_indices = numpy.in1d(initial_data, test_indices)
result = []
for array in arrays:
if isinstance(array, pandas.DataFrame):
result.append(array.iloc[train_indices, :])
result.append(array.iloc[test_indices, :])
elif (array is None) and allow_none:
# specially for checking weights
result.append(None)
result.append(None)
else:
result.append(numpy.array(array)[train_indices])
result.append(numpy.array(array)[test_indices])
return result
def get_columns_dict(columns):
"""
Get (new column: old column) dict expressions.
This function is used to process names of features, which can contain expressions.
:param list[str] columns: columns names
:rtype: dict
"""
result = OrderedDict()
for column in columns:
column_split = column.split(':')
assert len(column_split) < 3, 'Error in parsing feature expression {}'.format(column)
if len(column_split) == 2:
result[column_split[0].strip()] = column_split[1].strip()
else:
result[column] = column
return result
def get_columns_in_df(df, columns):
"""
Get columns in data frame using *numexpr* evaluation
:param pandas.DataFrame df: data
:param columns: necessary columns
:param columns: None or list[str]
:return: data frame with pointed columns
"""
if columns is None:
return df
columns_dict = get_columns_dict(columns)
df_new = OrderedDict()
for column_new, column in columns_dict.items():
if column in df.columns:
df_new[column_new] = df[column]
else:
# warning - this thing is known to fail in threads
# numexpr.evaluate(column, local_dict=df)
# thus we are using python engine, which is slow :(
df_new[column_new] = df.eval(column, engine='python')
return pandas.DataFrame(df_new)
def check_arrays(*arrays):
"""
Left for consistency, version of `sklearn.validation.check_arrays`
:param list[iterable] arrays: arrays with same length of first dimension.
"""
assert len(arrays) > 0, 'The number of array must be greater than zero'
checked_arrays = []
shapes = []
for arr in arrays:
if arr is not None:
checked_arrays.append(numpy.array(arr))
shapes.append(checked_arrays[-1].shape[0])
else:
checked_arrays.append(None)
assert numpy.sum(numpy.array(shapes) == shapes[0]) == len(shapes), 'Different shapes of the arrays {}'.format(
shapes)
return checked_arrays
def fit_metric(metric, *args, **kargs):
"""
Metric can implement one of two interfaces (function or object).
This function fits metrics, if it is required (by simply checking presence of fit method).
:param metric: metric function, following REP conventions
"""
if hasattr(metric, 'fit'):
metric.fit(*args, **kargs)
class Stopwatch(object):
"""
Simple tool to measure time.
If your internet connection is reliable, use %time magic.
>>> with Stopwatch() as timer:
>>> # do something here
>>> classifier.fit(X, y)
>>> # print how much time was spent
>>> print(timer)
"""
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, err_type, err_value, err_traceback):
self.stop = time.time()
self.err_type = err_type
self.err_value = err_value
self.err_traceback = err_traceback
@property
def elapsed(self):
return self.stop - self.start
def __repr__(self):
result = "interval: {:.2f} sec".format(self.elapsed)
if self.err_type is not None:
message = "\nError {error} of type {error_type} was raised"
result += message.format(error=repr(self.err_value), error_type=self.err_type)
return result
def take_last(sequence):
"""
Returns the last element in sequence or raises an error
"""
empty = True
for element in sequence:
empty = False
if empty:
raise IndexError('The sequence is empty.')
else:
return element
| apache-2.0 |
amolkahat/pandas | pandas/tests/extension/json/test_json.py | 2 | 8289 | import operator
import collections
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import PY2, PY36
from pandas.tests.extension import base
from .array import JSONArray, JSONDtype, make_data
pytestmark = pytest.mark.skipif(PY2, reason="Py2 doesn't have a UserDict")
@pytest.fixture
def dtype():
return JSONDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
data = make_data()
# Why the while loop? NumPy is unable to construct an ndarray from
# equal-length ndarrays. Many of our operations involve coercing the
# EA to an ndarray of objects. To avoid random test failures, we ensure
# that our data is coercable to an ndarray. Several tests deal with only
# the first two elements, so that's what we'll check.
while len(data[0]) == len(data[1]):
data = make_data()
return JSONArray(data)
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return JSONArray([{}, {'a': 10}])
@pytest.fixture
def data_for_sorting():
return JSONArray([{'b': 1}, {'c': 4}, {'a': 2, 'c': 3}])
@pytest.fixture
def data_missing_for_sorting():
return JSONArray([{'b': 1}, {}, {'a': 4}])
@pytest.fixture
def na_value(dtype):
return dtype.na_value
@pytest.fixture
def na_cmp():
return operator.eq
@pytest.fixture
def data_for_grouping():
return JSONArray([
{'b': 1}, {'b': 1},
{}, {},
{'a': 0, 'c': 2}, {'a': 0, 'c': 2},
{'b': 1},
{'c': 2},
])
class BaseJSON(object):
# NumPy doesn't handle an array of equal-length UserDicts.
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
# converting the UserDicts to dicts.
def assert_series_equal(self, left, right, **kwargs):
if left.dtype.name == 'json':
assert left.dtype == right.dtype
left = pd.Series(JSONArray(left.values.astype(object)),
index=left.index, name=left.name)
right = pd.Series(JSONArray(right.values.astype(object)),
index=right.index, name=right.name)
tm.assert_series_equal(left, right, **kwargs)
def assert_frame_equal(self, left, right, *args, **kwargs):
tm.assert_index_equal(
left.columns, right.columns,
exact=kwargs.get('check_column_type', 'equiv'),
check_names=kwargs.get('check_names', True),
check_exact=kwargs.get('check_exact', False),
check_categorical=kwargs.get('check_categorical', True),
obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame')))
jsons = (left.dtypes == 'json').index
for col in jsons:
self.assert_series_equal(left[col], right[col],
*args, **kwargs)
left = left.drop(columns=jsons)
right = right.drop(columns=jsons)
tm.assert_frame_equal(left, right, *args, **kwargs)
class TestDtype(BaseJSON, base.BaseDtypeTests):
pass
class TestInterface(BaseJSON, base.BaseInterfaceTests):
def test_custom_asserts(self):
# This would always trigger the KeyError from trying to put
# an array of equal-length UserDicts inside an ndarray.
data = JSONArray([collections.UserDict({'a': 1}),
collections.UserDict({'b': 2}),
collections.UserDict({'c': 3})])
a = pd.Series(data)
self.assert_series_equal(a, a)
self.assert_frame_equal(a.to_frame(), a.to_frame())
b = pd.Series(data.take([0, 0, 1]))
with pytest.raises(AssertionError):
self.assert_series_equal(a, b)
with pytest.raises(AssertionError):
self.assert_frame_equal(a.to_frame(), b.to_frame())
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
@pytest.mark.skip(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
pass
class TestReshaping(BaseJSON, base.BaseReshapingTests):
pass
class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
class TestMissing(BaseJSON, base.BaseMissingTests):
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
unhashable = pytest.mark.skip(reason="Unhashable")
unstable = pytest.mark.skipif(not PY36, # 3.6 or higher
reason="Dictionary order unstable")
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(BaseJSON, base.BaseMethodsTests):
@unhashable
def test_value_counts(self, all_data, dropna):
pass
@unhashable
def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
pass
@unstable
def test_argsort(self, data_for_sorting):
super(TestMethods, self).test_argsort(data_for_sorting)
@unstable
def test_argsort_missing(self, data_missing_for_sorting):
super(TestMethods, self).test_argsort_missing(
data_missing_for_sorting)
@unstable
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values(self, data_for_sorting, ascending):
super(TestMethods, self).test_sort_values(
data_for_sorting, ascending)
@unstable
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
super(TestMethods, self).test_sort_values_missing(
data_missing_for_sorting, ascending)
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_le(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_add(self, data_repeated):
pass
@unhashable
def test_hash_pandas_object_works(self, data, kind):
super().test_hash_pandas_object_works(data, kind)
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
*** ValueError: setting an array element with a sequence
"""
# We intentionally don't run base.BaseSetitemTests because pandas'
# internals has trouble setting sequences of values into scalar positions.
class TestGroupby(BaseJSON, base.BaseGroupbyTests):
@unhashable
def test_groupby_extension_transform(self):
"""
This currently fails in Series.name.setter, since the
name must be hashable, but the value is a dictionary.
I think this is what we want, i.e. `.name` should be the original
values, and not the values for factorization.
"""
@unhashable
def test_groupby_extension_apply(self):
"""
This fails in Index._do_unique_check with
> hash(val)
E TypeError: unhashable type: 'UserDict' with
I suspect that once we support Index[ExtensionArray],
we'll be able to dispatch unique.
"""
@unstable
@pytest.mark.parametrize('as_index', [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super(TestGroupby, self).test_groupby_extension_agg(
as_index, data_for_grouping
)
class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
def test_error(self, data, all_arithmetic_operators):
pass
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with tm.assert_raises_regex(TypeError, "unsupported"):
ser + data
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super(TestArithmeticOps, self)._check_divmod_op(
s, op, other, exc=TypeError
)
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
| bsd-3-clause |
josherick/bokeh | bokeh/charts/builder/dot_builder.py | 43 | 6160 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Dot class which lets you build your Dot charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors, make_scatter
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, FactorRange, GlyphRenderer, Range1d
from ...models.glyphs import Segment
from ...properties import Any, Bool, Either, List
def Dot(values, cat=None, stem=True, xscale="categorical", yscale="linear",
xgrid=False, ygrid=True, **kws):
""" Create a dot chart using :class:`DotBuilder <bokeh.charts.builder.dot_builder.DotBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Dot, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = OrderedDict()
xyvalues['python']=[2, 5]
xyvalues['pypy']=[12, 40]
xyvalues['jython']=[22, 30]
dot = Dot(xyvalues, ['cpu1', 'cpu2'], title='dots')
output_file('dot.html')
show(dot)
"""
return create_and_build(
DotBuilder, values, cat=cat, stem=stem, xscale=xscale, yscale=yscale,
xgrid=xgrid, ygrid=ygrid, **kws
)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DotBuilder(Builder):
"""This is the Dot class and it is in charge of plotting Dot chart
in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (segments and circles) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
stem = Bool(True, help="""
Whether to draw a stem from each do to the axis.
""")
def _process_data(self):
"""Take the Dot data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the rect glyph inside the ``_yield_renderers`` method.
"""
if not self.cat:
self.cat = [str(x) for x in self._values.index]
self._data = dict(cat=self.cat, zero=np.zeros(len(self.cat)))
# list to save all the attributes we are going to create
# list to save all the groups available in the incoming input
# Grouping
self._groups.extend(self._values.keys())
step = np.linspace(0, 1.0, len(self._values.keys()) + 1, endpoint=False)
for i, (val, values) in enumerate(self._values.items()):
# original y value
self.set_and_get("", val, values)
# x value
cats = [c + ":" + str(step[i + 1]) for c in self.cat]
self.set_and_get("cat", val, cats)
# zeros
self.set_and_get("z_", val, np.zeros(len(values)))
# segment top y value
self.set_and_get("seg_top_", val, values)
def _set_sources(self):
"""Push the Dot data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = FactorRange(factors=self._source.data["cat"])
cat = [i for i in self._attr if not i.startswith(("cat",))]
end = 1.1 * max(max(self._data[i]) for i in cat)
self.y_range = Range1d(start=0, end=end)
def _yield_renderers(self):
"""Use the rect glyphs to display the bars.
Takes reference points from data loaded at the source and
renders circle glyphs (and segments) on the related
coordinates.
"""
self._tuples = list(chunk(self._attr, 4))
colors = cycle_colors(self._tuples, self.palette)
# quartet elements are: [data, cat, zeros, segment_top]
for i, quartet in enumerate(self._tuples):
# draw segment first so when scatter will be place on top of it
# and it won't show segment chunk on top of the circle
if self.stem:
glyph = Segment(
x0=quartet[1], y0=quartet[2], x1=quartet[1], y1=quartet[3],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
renderer = make_scatter(
self._source, quartet[1], quartet[0], 'circle',
colors[i - 1], line_color='black', size=15, fill_alpha=1.,
)
self._legends.append((self._groups[i], [renderer]))
yield renderer
| bsd-3-clause |
kdebrab/pandas | pandas/tests/indexing/test_chaining_and_caching.py | 4 | 14497 | from warnings import catch_warnings
import pytest
import numpy as np
import pandas as pd
from pandas.core import common as com
from pandas import (compat, DataFrame, option_context,
Series, MultiIndex, date_range, Timestamp)
from pandas.util import testing as tm
class TestCaching(object):
def test_slice_consolidate_invalidate_item_cache(self):
# this is chained assignment, but will 'work'
with option_context('chained_assignment', None):
# #3970
df = DataFrame({"aa": compat.lrange(5), "bb": [2.2] * 5})
# Creates a second float block
df["cc"] = 0.0
# caches a reference to the 'bb' series
df["bb"]
# repr machinery triggers consolidation
repr(df)
# Assignment to wrong series
df['bb'].iloc[0] = 0.17
df._clear_item_cache()
tm.assert_almost_equal(df['bb'][0], 0.17)
def test_setitem_cache_updating(self):
# GH 5424
cont = ['one', 'two', 'three', 'four', 'five', 'six', 'seven']
for do_ref in [False, False]:
df = DataFrame({'a': cont,
"b": cont[3:] + cont[:3],
'c': np.arange(7)})
# ref the cache
if do_ref:
df.loc[0, "c"]
# set it
df.loc[7, 'c'] = 1
assert df.loc[0, 'c'] == 0.0
assert df.loc[7, 'c'] == 1.0
# GH 7084
# not updating cache on series setting with slices
expected = DataFrame({'A': [600, 600, 600]},
index=date_range('5/7/2014', '5/9/2014'))
out = DataFrame({'A': [0, 0, 0]},
index=date_range('5/7/2014', '5/9/2014'))
df = DataFrame({'C': ['A', 'A', 'A'], 'D': [100, 200, 300]})
# loop through df to update out
six = Timestamp('5/7/2014')
eix = Timestamp('5/9/2014')
for ix, row in df.iterrows():
out.loc[six:eix, row['C']] = out.loc[six:eix, row['C']] + row['D']
tm.assert_frame_equal(out, expected)
tm.assert_series_equal(out['A'], expected['A'])
# try via a chain indexing
# this actually works
out = DataFrame({'A': [0, 0, 0]},
index=date_range('5/7/2014', '5/9/2014'))
for ix, row in df.iterrows():
v = out[row['C']][six:eix] + row['D']
out[row['C']][six:eix] = v
tm.assert_frame_equal(out, expected)
tm.assert_series_equal(out['A'], expected['A'])
out = DataFrame({'A': [0, 0, 0]},
index=date_range('5/7/2014', '5/9/2014'))
for ix, row in df.iterrows():
out.loc[six:eix, row['C']] += row['D']
tm.assert_frame_equal(out, expected)
tm.assert_series_equal(out['A'], expected['A'])
class TestChaining(object):
def test_setitem_chained_setfault(self):
# GH6026
# setfaults under numpy 1.7.1 (ok on 1.8)
data = ['right', 'left', 'left', 'left', 'right', 'left', 'timeout']
mdata = ['right', 'left', 'left', 'left', 'right', 'left', 'none']
df = DataFrame({'response': np.array(data)})
mask = df.response == 'timeout'
df.response[mask] = 'none'
tm.assert_frame_equal(df, DataFrame({'response': mdata}))
recarray = np.rec.fromarrays([data], names=['response'])
df = DataFrame(recarray)
mask = df.response == 'timeout'
df.response[mask] = 'none'
tm.assert_frame_equal(df, DataFrame({'response': mdata}))
df = DataFrame({'response': data, 'response1': data})
mask = df.response == 'timeout'
df.response[mask] = 'none'
tm.assert_frame_equal(df, DataFrame({'response': mdata,
'response1': data}))
# GH 6056
expected = DataFrame(dict(A=[np.nan, 'bar', 'bah', 'foo', 'bar']))
df = DataFrame(dict(A=np.array(['foo', 'bar', 'bah', 'foo', 'bar'])))
df['A'].iloc[0] = np.nan
result = df.head()
tm.assert_frame_equal(result, expected)
df = DataFrame(dict(A=np.array(['foo', 'bar', 'bah', 'foo', 'bar'])))
df.A.iloc[0] = np.nan
result = df.head()
tm.assert_frame_equal(result, expected)
def test_detect_chained_assignment(self):
pd.set_option('chained_assignment', 'raise')
# work with the chain
expected = DataFrame([[-5, 1], [-6, 3]], columns=list('AB'))
df = DataFrame(np.arange(4).reshape(2, 2),
columns=list('AB'), dtype='int64')
assert df._is_copy is None
df['A'][0] = -5
df['A'][1] = -6
tm.assert_frame_equal(df, expected)
# test with the chaining
df = DataFrame({'A': Series(range(2), dtype='int64'),
'B': np.array(np.arange(2, 4), dtype=np.float64)})
assert df._is_copy is None
with pytest.raises(com.SettingWithCopyError):
df['A'][0] = -5
with pytest.raises(com.SettingWithCopyError):
df['A'][1] = np.nan
assert df['A']._is_copy is None
# Using a copy (the chain), fails
df = DataFrame({'A': Series(range(2), dtype='int64'),
'B': np.array(np.arange(2, 4), dtype=np.float64)})
with pytest.raises(com.SettingWithCopyError):
df.loc[0]['A'] = -5
# Doc example
df = DataFrame({'a': ['one', 'one', 'two', 'three',
'two', 'one', 'six'],
'c': Series(range(7), dtype='int64')})
assert df._is_copy is None
with pytest.raises(com.SettingWithCopyError):
indexer = df.a.str.startswith('o')
df[indexer]['c'] = 42
expected = DataFrame({'A': [111, 'bbb', 'ccc'], 'B': [1, 2, 3]})
df = DataFrame({'A': ['aaa', 'bbb', 'ccc'], 'B': [1, 2, 3]})
with pytest.raises(com.SettingWithCopyError):
df['A'][0] = 111
with pytest.raises(com.SettingWithCopyError):
df.loc[0]['A'] = 111
df.loc[0, 'A'] = 111
tm.assert_frame_equal(df, expected)
# gh-5475: Make sure that is_copy is picked up reconstruction
df = DataFrame({"A": [1, 2]})
assert df._is_copy is None
with tm.ensure_clean('__tmp__pickle') as path:
df.to_pickle(path)
df2 = pd.read_pickle(path)
df2["B"] = df2["A"]
df2["B"] = df2["A"]
# gh-5597: a spurious raise as we are setting the entire column here
from string import ascii_letters as letters
def random_text(nobs=100):
df = []
for i in range(nobs):
idx = np.random.randint(len(letters), size=2)
idx.sort()
df.append([letters[idx[0]:idx[1]]])
return DataFrame(df, columns=['letters'])
df = random_text(100000)
# Always a copy
x = df.iloc[[0, 1, 2]]
assert x._is_copy is not None
x = df.iloc[[0, 1, 2, 4]]
assert x._is_copy is not None
# Explicitly copy
indexer = df.letters.apply(lambda x: len(x) > 10)
df = df.loc[indexer].copy()
assert df._is_copy is None
df['letters'] = df['letters'].apply(str.lower)
# Implicitly take
df = random_text(100000)
indexer = df.letters.apply(lambda x: len(x) > 10)
df = df.loc[indexer]
assert df._is_copy is not None
df['letters'] = df['letters'].apply(str.lower)
# Implicitly take 2
df = random_text(100000)
indexer = df.letters.apply(lambda x: len(x) > 10)
df = df.loc[indexer]
assert df._is_copy is not None
df.loc[:, 'letters'] = df['letters'].apply(str.lower)
# Should be ok even though it's a copy!
assert df._is_copy is None
df['letters'] = df['letters'].apply(str.lower)
assert df._is_copy is None
df = random_text(100000)
indexer = df.letters.apply(lambda x: len(x) > 10)
df.loc[indexer, 'letters'] = (
df.loc[indexer, 'letters'].apply(str.lower))
# an identical take, so no copy
df = DataFrame({'a': [1]}).dropna()
assert df._is_copy is None
df['a'] += 1
# Inplace ops, originally from:
# http://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
a = [12, 23]
b = [123, None]
c = [1234, 2345]
d = [12345, 23456]
tuples = [('eyes', 'left'), ('eyes', 'right'), ('ears', 'left'),
('ears', 'right')]
events = {('eyes', 'left'): a,
('eyes', 'right'): b,
('ears', 'left'): c,
('ears', 'right'): d}
multiind = MultiIndex.from_tuples(tuples, names=['part', 'side'])
zed = DataFrame(events, index=['a', 'b'], columns=multiind)
with pytest.raises(com.SettingWithCopyError):
zed['eyes']['right'].fillna(value=555, inplace=True)
df = DataFrame(np.random.randn(10, 4))
s = df.iloc[:, 0].sort_values()
tm.assert_series_equal(s, df.iloc[:, 0].sort_values())
tm.assert_series_equal(s, df[0].sort_values())
# see gh-6025: false positives
df = DataFrame({'column1': ['a', 'a', 'a'], 'column2': [4, 8, 9]})
str(df)
df['column1'] = df['column1'] + 'b'
str(df)
df = df[df['column2'] != 8]
str(df)
df['column1'] = df['column1'] + 'c'
str(df)
# from SO:
# http://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc
df = DataFrame(np.arange(0, 9), columns=['count'])
df['group'] = 'b'
with pytest.raises(com.SettingWithCopyError):
df.iloc[0:5]['group'] = 'a'
# Mixed type setting but same dtype & changing dtype
df = DataFrame(dict(A=date_range('20130101', periods=5),
B=np.random.randn(5),
C=np.arange(5, dtype='int64'),
D=list('abcde')))
with pytest.raises(com.SettingWithCopyError):
df.loc[2]['D'] = 'foo'
with pytest.raises(com.SettingWithCopyError):
df.loc[2]['C'] = 'foo'
with pytest.raises(com.SettingWithCopyError):
df['C'][2] = 'foo'
def test_setting_with_copy_bug(self):
# operating on a copy
df = DataFrame({'a': list(range(4)),
'b': list('ab..'),
'c': ['a', 'b', np.nan, 'd']})
mask = pd.isna(df.c)
def f():
df[['c']][mask] = df[['b']][mask]
pytest.raises(com.SettingWithCopyError, f)
# invalid warning as we are returning a new object
# GH 8730
df1 = DataFrame({'x': Series(['a', 'b', 'c']),
'y': Series(['d', 'e', 'f'])})
df2 = df1[['x']]
# this should not raise
df2['y'] = ['g', 'h', 'i']
def test_detect_chained_assignment_warnings(self):
# warnings
with option_context('chained_assignment', 'warn'):
df = DataFrame({'A': ['aaa', 'bbb', 'ccc'], 'B': [1, 2, 3]})
with tm.assert_produces_warning(
expected_warning=com.SettingWithCopyWarning):
df.loc[0]['A'] = 111
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = tm.makeDataFrame()
df['A'] # cache series
with catch_warnings(record=True):
df.ix["Hello Friend"] = df.ix[0]
assert "Hello Friend" in df['A'].index
assert "Hello Friend" in df['B'].index
with catch_warnings(record=True):
panel = tm.makePanel()
panel.ix[0] # get first item into cache
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
assert "A+1" in panel.ix[0].columns
assert "A+1" in panel.ix[1].columns
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=['x', 'y', 'z'])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]['z'].iloc[0] = 1.
result = df.loc[(0, 0), 'z']
assert result == 1
# correct setting
df.loc[(0, 0), 'z'] = 2
result = df.loc[(0, 0), 'z']
assert result == 2
# 10264
df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e'], index=range(5))
df['f'] = 0
df.f.values[3] = 1
# TODO(wesm): unused?
# y = df.iloc[np.arange(2, len(df))]
df.f.values[3] = 2
expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
expected.at[3, 'f'] = 2
tm.assert_frame_equal(df, expected)
expected = Series([0, 0, 0, 2, 0], name='f')
tm.assert_series_equal(df.f, expected)
def test_deprecate_is_copy(self):
# GH18801
df = DataFrame({"A": [1, 2, 3]})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# getter
df.is_copy
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# setter
df.is_copy = "test deprecated is_copy"
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/ensemble/partial_dependence.py | 36 | 14909 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| mit |
arjoly/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
shangwuhencc/scikit-learn | doc/sphinxext/gen_rst.py | 106 | 40198 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
rstoneback/pysat | pysat/instruments/methods/sw.py | 2 | 23054 | # -*- coding: utf-8 -*-.
"""Provides default routines for solar wind and geospace indices
"""
from __future__ import print_function
from __future__ import absolute_import
import pandas as pds
import numpy as np
import pysat
def combine_kp(standard_inst=None, recent_inst=None, forecast_inst=None,
start=None, stop=None, fill_val=np.nan):
""" Combine the output from the different Kp sources for a range of dates
Parameters
----------
standard_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'kp' name,
and '' tag or None to exclude (default=None)
recent_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'kp' name,
and 'recent' tag or None to exclude (default=None)
forecast_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'kp' name,
and 'forecast' tag or None to exclude (default=None)
start : (dt.datetime or NoneType)
Starting time for combining data, or None to use earliest loaded
date from the pysat Instruments (default=None)
stop : (dt.datetime)
Ending time for combining data, or None to use the latest loaded date
from the pysat Instruments (default=None)
fill_val : (int or float)
Desired fill value (since the standard instrument fill value differs
from the other sources) (default=np.nan)
Returns
-------
kp_inst : (pysat.Instrument)
Instrument object containing Kp observations for the desired period of
time, merging the standard, recent, and forecasted values based on
their reliability
Notes
-----
Merging prioritizes the standard data, then the recent data, and finally
the forecast data
Will not attempt to download any missing data, but will load data
"""
notes = "Combines data from"
# Create an ordered list of the Instruments, excluding any that are None
all_inst = list()
tag = 'combined'
inst_flag = None
if standard_inst is not None:
all_inst.append(standard_inst)
tag += '_standard'
if inst_flag is None:
inst_flag = 'standard'
if recent_inst is not None:
all_inst.append(recent_inst)
tag += '_recent'
if inst_flag is None:
inst_flag = 'recent'
if forecast_inst is not None:
all_inst.append(forecast_inst)
tag += '_forecast'
if inst_flag is None:
inst_flag = 'forecast'
if len(all_inst) < 2:
raise ValueError("need at two Kp Instrument objects to combine them")
# If the start or stop times are not defined, get them from the Instruments
if start is None:
stimes = [inst.index.min() for inst in all_inst if len(inst.index) > 0]
start = min(stimes) if len(stimes) > 0 else None
if stop is None:
stimes = [inst.index.max() for inst in all_inst if len(inst.index) > 0]
stop = max(stimes) if len(stimes) > 0 else None
stop += pds.DateOffset(days=1)
if start is None or stop is None:
raise ValueError("must either load in Instrument objects or provide" +
" starting and ending times")
# Initialize the output instrument
kp_inst = pysat.Instrument()
kp_inst.platform = all_inst[0].platform
kp_inst.name = all_inst[0].name
kp_inst.tag = tag
kp_inst.date = start
kp_inst.doy = int(start.strftime("%j"))
kp_inst.meta = pysat.Meta()
pysat.instruments.sw_kp.initialize_kp_metadata(kp_inst.meta, 'Kp',
fill_val=fill_val)
kp_times = list()
kp_values = list()
# Cycle through the desired time range
itime = start
while itime < stop and inst_flag is not None:
# Load and save the standard data for as many times as possible
if inst_flag == 'standard':
standard_inst.load(date=itime)
if notes.find("standard") < 0:
notes += " the {:} source ({:} to ".format(inst_flag,
itime.date())
if len(standard_inst.index) == 0:
inst_flag = 'forecast' if recent_inst is None else 'recent'
notes += "{:})".format(itime.date())
else:
kp_times.extend(list(standard_inst.index))
kp_values.extend(list(standard_inst['Kp']))
itime = kp_times[-1] + pds.DateOffset(hours=3)
# Load and save the recent data for as many times as possible
if inst_flag == 'recent':
# Determine which files should be loaded
if len(recent_inst.index) == 0:
files = np.unique(recent_inst.files.files[itime:stop])
else:
files = [None] # No load needed, if already initialized
# Cycle through all possible files of interest, saving relevant
# data
for filename in files:
if filename is not None:
recent_inst.load(fname=filename)
if notes.find("recent") < 0:
notes += " the {:} source ({:} to ".format(inst_flag,
itime.date())
# Determine which times to save
local_fill_val = recent_inst.meta['Kp'].fill
good_times = ((recent_inst.index >= itime) &
(recent_inst.index < stop))
good_vals = recent_inst['Kp'][good_times] != local_fill_val
# Save output data and cycle time
kp_times.extend(list(recent_inst.index[good_times][good_vals]))
kp_values.extend(list(recent_inst['Kp'][good_times][good_vals]))
itime = kp_times[-1] + pds.DateOffset(hours=3)
inst_flag = 'forecast' if forecast_inst is not None else None
notes += "{:})".format(itime.date())
# Load and save the forecast data for as many times as possible
if inst_flag == "forecast":
# Determine which files should be loaded
if len(forecast_inst.index) == 0:
files = np.unique(forecast_inst.files.files[itime:stop])
else:
files = [None] # No load needed, if already initialized
# Cycle through all possible files of interest, saving relevant
# data
for filename in files:
if filename is not None:
forecast_inst.load(fname=filename)
if notes.find("forecast") < 0:
notes += " the {:} source ({:} to ".format(inst_flag,
itime.date())
# Determine which times to save
local_fill_val = forecast_inst.meta['Kp'].fill
good_times = ((forecast_inst.index >= itime) &
(forecast_inst.index < stop))
good_vals = forecast_inst['Kp'][good_times] != local_fill_val
# Save desired data and cycle time
kp_times.extend(list(forecast_inst.index[good_times][good_vals]))
kp_values.extend(list(forecast_inst['Kp'][good_times][good_vals]))
itime = kp_times[-1] + pds.DateOffset(hours=3)
notes += "{:})".format(itime.date())
inst_flag = None
if inst_flag is not None:
notes += "{:})".format(itime.date())
# Determine if the beginning or end of the time series needs to be padded
freq = None if len(kp_times) < 2 else pysat.utils.time.calc_freq(kp_times)
date_range = pds.date_range(start=start, end=stop-pds.DateOffset(days=1),
freq=freq)
if len(kp_times) == 0:
kp_times = date_range
if date_range[0] < kp_times[0]:
# Extend the time and value arrays from their beginning with fill
# values
itime = abs(date_range - kp_times[0]).argmin()
kp_times.reverse()
kp_values.reverse()
extend_times = list(date_range[:itime])
extend_times.reverse()
kp_times.extend(extend_times)
kp_values.extend([fill_val for kk in extend_times])
kp_times.reverse()
kp_values.reverse()
if date_range[-1] > kp_times[-1]:
# Extend the time and value arrays from their end with fill values
itime = abs(date_range - kp_times[-1]).argmin() + 1
extend_times = list(date_range[itime:])
kp_times.extend(extend_times)
kp_values.extend([fill_val for kk in extend_times])
# Save output data
kp_inst.data = pds.DataFrame(kp_values, columns=['Kp'], index=kp_times)
# Resample the output data, filling missing values
if(date_range.shape != kp_inst.index.shape or
abs(date_range - kp_inst.index).max().total_seconds() > 0.0):
kp_inst.data = kp_inst.data.resample(freq).fillna(method=None)
if np.isfinite(fill_val):
kp_inst.data[np.isnan(kp_inst.data)] = fill_val
# Update the metadata notes for this custom procedure
notes += ", in that order"
kp_inst.meta.__setitem__('Kp', {kp_inst.meta.notes_label: notes})
return kp_inst
def combine_f107(standard_inst, forecast_inst, start=None, stop=None):
""" Combine the output from the measured and forecasted F10.7 sources
Parameters
----------
standard_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'f107' name,
and '', 'all', 'prelim', or 'daily' tag
forecast_inst : (pysat.Instrument or NoneType)
Instrument object containing data for the 'sw' platform, 'f107' name,
and 'prelim', '45day' or 'forecast' tag
start : (dt.datetime or NoneType)
Starting time for combining data, or None to use earliest loaded
date from the pysat Instruments (default=None)
stop : (dt.datetime)
Ending time for combining data, or None to use the latest loaded date
from the pysat Instruments (default=None)
Returns
-------
f107_inst : (pysat.Instrument)
Instrument object containing F10.7 observations for the desired period
of time, merging the standard, 45day, and forecasted values based on
their reliability
Notes
-----
Merging prioritizes the standard data, then the 45day data, and finally
the forecast data
Will not attempt to download any missing data, but will load data
"""
# Initialize metadata and flags
notes = "Combines data from"
stag = standard_inst.tag if len(standard_inst.tag) > 0 else 'default'
tag = 'combined_{:s}_{:s}'.format(stag, forecast_inst.tag)
inst_flag = 'standard'
# If the start or stop times are not defined, get them from the Instruments
if start is None:
stimes = [inst.index.min() for inst in [standard_inst, forecast_inst]
if len(inst.index) > 0]
start = min(stimes) if len(stimes) > 0 else None
if stop is None:
stimes = [inst.index.max() for inst in [standard_inst, forecast_inst]
if len(inst.index) > 0]
stop = max(stimes) + pds.DateOffset(days=1) if len(stimes) > 0 else None
if start is None or stop is None:
raise ValueError("must either load in Instrument objects or provide" +
" starting and ending times")
if start >= stop:
raise ValueError("date range is zero or negative")
# Initialize the output instrument
f107_inst = pysat.Instrument()
f107_inst.platform = standard_inst.platform
f107_inst.name = standard_inst.name
f107_inst.tag = tag
f107_inst.date = start
f107_inst.doy = int(start.strftime("%j"))
fill_val = None
f107_times = list()
f107_values = list()
# Cycle through the desired time range
itime = start
while itime < stop and inst_flag is not None:
# Load and save the standard data for as many times as possible
if inst_flag == 'standard':
# Test to see if data loading is needed
if not np.any(standard_inst.index == itime):
if standard_inst.tag == 'daily':
# Add 30 days
standard_inst.load(date=itime+pds.DateOffset(days=30))
else:
standard_inst.load(date=itime)
good_times = ((standard_inst.index >= itime) &
(standard_inst.index < stop))
if notes.find("standard") < 0:
notes += " the {:} source ({:} to ".format(inst_flag,
itime.date())
if np.any(good_times):
if fill_val is None:
f107_inst.meta = standard_inst.meta
fill_val = f107_inst.meta['f107'].fill
good_vals = standard_inst['f107'][good_times] != fill_val
f107_times.extend(list(standard_inst.index[good_times][good_vals]))
f107_values.extend(list(standard_inst['f107'][good_times][good_vals]))
itime = f107_times[-1] + pds.DateOffset(days=1)
else:
inst_flag = 'forecast'
notes += "{:})".format(itime.date())
# Load and save the forecast data for as many times as possible
if inst_flag == "forecast":
# Determine which files should be loaded
if len(forecast_inst.index) == 0:
files = np.unique(forecast_inst.files.files[itime:stop])
else:
files = [None] # No load needed, if already initialized
# Cycle through all possible files of interest, saving relevant
# data
for filename in files:
if filename is not None:
forecast_inst.load(fname=filename)
if notes.find("forecast") < 0:
notes += " the {:} source ({:} to ".format(inst_flag,
itime.date())
# Check in case there was a problem with the standard data
if fill_val is None:
f107_inst.meta = forecast_inst.meta
fill_val = f107_inst.meta['f107'].fill
# Determine which times to save
good_times = ((forecast_inst.index >= itime) &
(forecast_inst.index < stop))
good_vals = forecast_inst['f107'][good_times] != fill_val
# Save desired data and cycle time
f107_times.extend(list(forecast_inst.index[good_times][good_vals]))
f107_values.extend(list(forecast_inst['f107'][good_times][good_vals]))
itime = f107_times[-1] + pds.DateOffset(days=1)
notes += "{:})".format(itime.date())
inst_flag = None
if inst_flag is not None:
notes += "{:})".format(itime.date())
# Determine if the beginning or end of the time series needs to be padded
freq = pysat.utils.time.calc_freq(f107_times)
date_range = pds.date_range(start=start, end=stop-pds.DateOffset(days=1),
freq=freq)
if date_range[0] < f107_times[0]:
# Extend the time and value arrays from their beginning with fill
# values
itime = abs(date_range - f107_times[0]).argmin()
f107_times.reverse()
f107_values.reverse()
extend_times = list(date_range[:itime])
extend_times.reverse()
f107_times.extend(extend_times)
f107_values.extend([fill_val for kk in extend_times])
f107_times.reverse()
f107_values.reverse()
if date_range[-1] > f107_times[-1]:
# Extend the time and value arrays from their end with fill values
itime = abs(date_range - f107_times[-1]).argmin() + 1
extend_times = list(date_range[itime:])
f107_times.extend(extend_times)
f107_values.extend([fill_val for kk in extend_times])
# Save output data
f107_inst.data = pds.DataFrame(f107_values, columns=['f107'],
index=f107_times)
# Resample the output data, filling missing values
if(date_range.shape != f107_inst.index.shape or
abs(date_range - f107_inst.index).max().total_seconds() > 0.0):
f107_inst.data = f107_inst.data.resample(freq).fillna(method=None)
if np.isfinite(fill_val):
f107_inst.data[np.isnan(f107_inst.data)] = fill_val
# Update the metadata notes for this custom procedure
notes += ", in that order"
f107_inst.meta.__setitem__('f107', {f107_inst.meta.notes_label: notes})
return f107_inst
def calc_daily_Ap(ap_inst, ap_name='3hr_ap', daily_name='Ap',
running_name=None):
""" Calculate the daily Ap index from the 3hr ap index
Parameters
----------
ap_inst : (pysat.Instrument)
pysat instrument containing 3-hourly ap data
ap_name : (str)
Column name for 3-hourly ap data (default='3hr_ap')
daily_name : (str)
Column name for daily Ap data (default='Ap')
running_name : (str or NoneType)
Column name for daily running average of ap, not output if None
(default=None)
Returns
-------
Void : updates intrument to include daily Ap index under daily_name
Notes
-----
Ap is the mean of the 3hr ap indices measured for a given day
Option for running average is included since this information is used
by MSIS when running with sub-daily geophysical inputs
"""
# Test that the necessary data is available
if ap_name not in ap_inst.data.columns:
raise ValueError("bad 3-hourly ap column name: {:}".format(ap_name))
# Test to see that we will not be overwritting data
if daily_name in ap_inst.data.columns:
raise ValueError("daily Ap column name already exists: " + daily_name)
# Calculate the daily mean value
ap_mean = ap_inst[ap_name].rolling(window='1D', min_periods=8).mean()
if running_name is not None:
ap_inst[running_name] = ap_mean
meta_dict = {ap_inst.meta.units_label: '',
ap_inst.meta.name_label: running_name,
ap_inst.meta.desc_label: "running daily Ap index",
ap_inst.meta.plot_label: "24-h average ap",
ap_inst.meta.axis_label: "24-h average ap",
ap_inst.meta.scale_label: 'linear',
ap_inst.meta.min_label: 0,
ap_inst.meta.max_label: 400,
ap_inst.meta.fill_label:
ap_inst.meta[ap_name][ap_inst.meta.fill_label],
ap_inst.meta.notes_label: '24-h running average of '
+ '3-hourly ap indices'}
ap_inst.meta.__setitem__(running_name, meta_dict)
# Resample, backfilling so that each day uses the mean for the data from
# that day only
#
# Pad the data so the first day will be backfilled
ap_pad = pds.Series(np.full(shape=(1,), fill_value=np.nan),
index=[ap_mean.index[0] - pds.DateOffset(hours=3)])
# Extract the mean that only uses data for one day
ap_sel = ap_pad.combine_first(ap_mean[[i for i, tt in
enumerate(ap_mean.index)
if tt.hour == 21]])
# Backfill this data
ap_data = ap_sel.resample('3H').backfill()
# Save the output for the original time range
ap_inst[daily_name] = pds.Series(ap_data[1:], index=ap_data.index[1:])
# Add metadata
meta_dict = {ap_inst.meta.units_label: '',
ap_inst.meta.name_label: 'Ap',
ap_inst.meta.desc_label: "daily Ap index",
ap_inst.meta.plot_label: "Ap",
ap_inst.meta.axis_label: "Ap",
ap_inst.meta.scale_label: 'linear',
ap_inst.meta.min_label: 0,
ap_inst.meta.max_label: 400,
ap_inst.meta.fill_label:
ap_inst.meta[ap_name][ap_inst.meta.fill_label],
ap_inst.meta.notes_label: 'Ap daily mean calculated from '
+ '3-hourly ap indices'}
ap_inst.meta.__setitem__(daily_name, meta_dict)
return
def convert_ap_to_kp(ap_data, fill_val=-1, ap_name='ap'):
""" Convert Ap into Kp
Parameters
----------
ap_data : array-like
Array-like object containing Ap data
fill_val : int, float, NoneType
Fill value for the data set (default=-1)
ap_name : str
Name of the input ap
Returns
-------
kp_data : array-like
Array-like object containing Kp data
meta : Metadata
Metadata object containing information about transformed data
"""
# Ap are keys, Kp returned as double (N- = N.6667, N+=N.3333333)
one_third = 1.0 / 3.0
two_third = 2.0 / 3.0
ap_to_kp = {0: 0, 2: one_third, 3: two_third, 4: 1, 5: 1.0+one_third,
6: 1.0+two_third, 7: 2, 9: 2.0+one_third, 12: 2.0+two_third,
15: 3, 18: 3.0+one_third, 22: 3.0+two_third, 27: 4,
32: 4.0+one_third, 39: 4.0+two_third, 48: 5, 56: 5.0+one_third,
67: 5.0+two_third, 80: 6, 94: 6.0+one_third, 111: 6.0+two_third,
132: 7, 154: 7.0+one_third, 179: 7.0+two_third, 207: 8,
236: 8.0+one_third, 300: 8.0+two_third, 400: 9}
ap_keys = sorted(list(ap_to_kp.keys()))
# If the ap falls between two Kp indices, assign it to the lower Kp value
def round_ap(ap_in, fill_val=fill_val):
""" Round an ap value to the nearest Kp value
"""
if not np.isfinite(ap_in):
return fill_val
i = 0
while ap_keys[i] <= ap_in:
i += 1
i -= 1
if i >= len(ap_keys) or ap_keys[i] > ap_in:
return fill_val
return ap_to_kp[ap_keys[i]]
# Convert from ap to Kp
kp_data = np.array([ap_to_kp[aa] if aa in ap_keys else
round_ap(aa, fill_val=fill_val) for aa in ap_data])
# Set the metadata
meta = pysat.Meta()
meta['Kp'] = {meta.units_label : '',
meta.name_label: 'Kp',
meta.desc_label: 'Kp converted from {:}'.format(ap_name),
meta.plot_label: 'Kp',
meta.axis_label: 'Kp',
meta.scale_label: 'linear',
meta.min_label: 0,
meta.max_label: 9,
meta.fill_label: fill_val,
meta.notes_label: 'Kp converted from {:} as described at: '
'https://www.ngdc.noaa.gov/stp/GEOMAG/kp_ap.html'}
# Return data and metadata
return(kp_data, meta)
| bsd-3-clause |
NelisVerhoef/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
Titan-C/scikit-learn | examples/tree/plot_tree_regression.py | 82 | 1562 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, s=20, edgecolor="black",
c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue",
label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
shoyer/xarray | doc/gallery/plot_cartopy_facetgrid.py | 4 | 1285 | """
==================================
Multiple plots and map projections
==================================
Control the map projection parameters on multiple axes
This example illustrates how to plot multiple maps and control their extent
and aspect ratio.
For more details see `this discussion`_ on github.
.. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
# Load the data
ds = xr.tutorial.load_dataset("air_temperature")
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(
transform=ccrs.PlateCarree(), # the data's projection
col="time",
col_wrap=1, # multiplot settings
aspect=ds.dims["lon"] / ds.dims["lat"], # for a sensible figsize
subplot_kws={"projection": map_proj}, # the plot's projection
)
# We have to set the map's options on all four axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
# Without this aspect attributes the maps will look chaotic and the
# "extent" attribute above will be ignored
ax.set_aspect("equal")
plt.show()
| apache-2.0 |
ffagerho/experiment-analysis | analyze_polling.py | 3 | 2342 | from pandas import DataFrame
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
def calc_times(df): # here df as returned by data_helper.import_and_clean_data function
timearray = []
for x in df[df.event=="POLL"].target_email.unique():
polhaps = df[(df.event=="POLL") & (df.target_email==x)].more_info # poll
prevurl = ''
for y in polhaps:
ysplit = y.split(' ')
if not(prevurl.startswith(ysplit[2])): # a new article, thus a new session
prevurl = ysplit[2]
timearray.append({'email': x, 'sessiontime': int(ysplit[0][:-1])})
return DataFrame(timearray)
def userbased_values(df): # here df as returned by calc_times function
for x in df.email.unique():
temporary = df[df.email==x]
print 'For user {} time (s) spent on articles is as follows.'.format(x)
print 'Mean: '+repr(round(temporary.sessiontime.mean(),2))
print 'Median: '+repr(temporary.sessiontime.median())
print 'Minimum: '+repr(float(temporary.sessiontime.min()))
print 'Maximum: '+repr(float(temporary.sessiontime.max()))
print
def largescale_values(df): # here df as returned by calc_times function
print 'For all users, time (s) spent on articles is as follows.'
print 'Mean: '+repr(round(df.sessiontime.mean(),2))
print 'Median: '+repr(df.sessiontime.median())
print 'Minimum: '+repr(float(df.sessiontime.min()))
print 'Maximum: '+repr(float(df.sessiontime.max()))
print
def show_histogram(df): # here df as returned by calc_times function
df=group_sessiontimes(df)
df.sessiontime.value_counts().sort_index().plot(kind='bar')
ticklabels=['30 s', '65 s', '105 s', '150 s', '200 s','255 s', '315 s', '380 s','450 s', '525 s', '10-15 m', '15-30 m','30-60 m','1-24 h','24+ h']
plt.title('Histogram of polling times')
plt.xticks(range(len(ticklabels)),ticklabels,rotation=45);
plt.xlabel('Time')
plt.ylabel('Occurances')
plt.show()
def group_sessiontimes(df): # here df as returned by calc_times function
timebins = [600,900,1800,3600,86400,df.sessiontime.max()+1]
for i in range(0,len(timebins)-1):
df['sessiontime'].loc[(df['sessiontime']>=timebins[i]) & (df['sessiontime']<timebins[i+1])]=timebins[i]
return df | mit |
IndraVikas/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
huggingface/pytorch-transformers | src/transformers/__init__.py | 1 | 107733 | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# When adding a new object to this init, remember to add it twice: once inside the `_import_structure` dictionary and
# once inside the `if TYPE_CHECKING` branch. The `TYPE_CHECKING` should have import statements as usual, but they are
# only there for type checking. The `_import_structure` is a dictionary submodule to list of object names, and is used
# to defer the actual importing for when the objects are requested. This way `import transformers` provides the names
# in the namespace without actually importing anything (and especially none of the backends).
__version__ = "4.7.0.dev0"
# Work around to update TensorFlow's absl.logging threshold which alters the
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
except ImportError:
pass
else:
absl.logging.set_verbosity("info")
absl.logging.set_stderrthreshold("info")
absl.logging._warn_preinit_stderr = False
from typing import TYPE_CHECKING
# Check the dependencies satisfy the minimal versions required.
from . import dependency_versions_check
from .file_utils import (
_BaseLazyModule,
is_flax_available,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
from .utils import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
# Base objects, independent of any specific backend
_import_structure = {
"configuration_utils": ["PretrainedConfig"],
"data": [
"DataProcessor",
"InputExample",
"InputFeatures",
"SingleSentenceClassificationProcessor",
"SquadExample",
"SquadFeatures",
"SquadV1Processor",
"SquadV2Processor",
"glue_compute_metrics",
"glue_convert_examples_to_features",
"glue_output_modes",
"glue_processors",
"glue_tasks_num_labels",
"squad_convert_examples_to_features",
"xnli_compute_metrics",
"xnli_output_modes",
"xnli_processors",
"xnli_tasks_num_labels",
],
"feature_extraction_sequence_utils": ["BatchFeature", "SequenceFeatureExtractor"],
"file_utils": [
"CONFIG_NAME",
"MODEL_CARD_NAME",
"PYTORCH_PRETRAINED_BERT_CACHE",
"PYTORCH_TRANSFORMERS_CACHE",
"SPIECE_UNDERLINE",
"TF2_WEIGHTS_NAME",
"TF_WEIGHTS_NAME",
"TRANSFORMERS_CACHE",
"WEIGHTS_NAME",
"TensorType",
"add_end_docstrings",
"add_start_docstrings",
"cached_path",
"is_apex_available",
"is_datasets_available",
"is_faiss_available",
"is_flax_available",
"is_psutil_available",
"is_py3nvml_available",
"is_sentencepiece_available",
"is_sklearn_available",
"is_speech_available",
"is_tf_available",
"is_tokenizers_available",
"is_torch_available",
"is_torch_tpu_available",
"is_vision_available",
],
"hf_argparser": ["HfArgumentParser"],
"integrations": [
"is_comet_available",
"is_optuna_available",
"is_ray_available",
"is_ray_tune_available",
"is_tensorboard_available",
"is_wandb_available",
],
"modelcard": ["ModelCard"],
"modeling_tf_pytorch_utils": [
"convert_tf_weight_name_to_pt_weight_name",
"load_pytorch_checkpoint_in_tf2_model",
"load_pytorch_model_in_tf2_model",
"load_pytorch_weights_in_tf2_model",
"load_tf2_checkpoint_in_pytorch_model",
"load_tf2_model_in_pytorch_model",
"load_tf2_weights_in_pytorch_model",
],
# Models
"models": [],
"models.albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig"],
"models.auto": [
"ALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CONFIG_MAPPING",
"FEATURE_EXTRACTOR_MAPPING",
"MODEL_NAMES_MAPPING",
"TOKENIZER_MAPPING",
"AutoConfig",
"AutoFeatureExtractor",
"AutoTokenizer",
],
"models.bart": ["BartConfig", "BartTokenizer"],
"models.barthez": [],
"models.bert": [
"BERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BasicTokenizer",
"BertConfig",
"BertTokenizer",
"WordpieceTokenizer",
],
"models.bert_generation": ["BertGenerationConfig"],
"models.bert_japanese": ["BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer"],
"models.bertweet": ["BertweetTokenizer"],
"models.big_bird": ["BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdConfig", "BigBirdTokenizer"],
"models.bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
],
"models.blenderbot": ["BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotTokenizer"],
"models.blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallTokenizer",
],
"models.camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig"],
"models.clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPTextConfig",
"CLIPTokenizer",
"CLIPVisionConfig",
],
"models.convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer"],
"models.cpm": ["CpmTokenizer"],
"models.ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig", "CTRLTokenizer"],
"models.deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaTokenizer"],
"models.deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config"],
"models.deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig"],
"models.distilbert": ["DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertTokenizer"],
"models.dpr": [
"DPR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DPRConfig",
"DPRContextEncoderTokenizer",
"DPRQuestionEncoderTokenizer",
"DPRReaderOutput",
"DPRReaderTokenizer",
],
"models.electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraTokenizer"],
"models.encoder_decoder": ["EncoderDecoderConfig"],
"models.flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertTokenizer"],
"models.fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig", "FSMTTokenizer"],
"models.funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig", "FunnelTokenizer"],
"models.gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2Tokenizer"],
"models.gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig"],
"models.herbert": ["HerbertTokenizer"],
"models.ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig"],
"models.layoutlm": ["LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMConfig", "LayoutLMTokenizer"],
"models.led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig", "LEDTokenizer"],
"models.longformer": ["LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerTokenizer"],
"models.luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig", "LukeTokenizer"],
"models.lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig", "LxmertTokenizer"],
"models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"],
"models.marian": ["MarianConfig"],
"models.mbart": ["MBartConfig"],
"models.megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
"models.mmbt": ["MMBTConfig"],
"models.mobilebert": ["MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertTokenizer"],
"models.mpnet": ["MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "MPNetConfig", "MPNetTokenizer"],
"models.mt5": ["MT5Config"],
"models.openai": ["OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenAIGPTConfig", "OpenAIGPTTokenizer"],
"models.pegasus": ["PegasusConfig"],
"models.phobert": ["PhobertTokenizer"],
"models.prophetnet": ["PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig", "ProphetNetTokenizer"],
"models.rag": ["RagConfig", "RagRetriever", "RagTokenizer"],
"models.reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"],
"models.retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig", "RetriBertTokenizer"],
"models.roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaTokenizer"],
"models.roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerTokenizer"],
"models.speech_to_text": [
"SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Speech2TextConfig",
],
"models.squeezebert": ["SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SqueezeBertConfig", "SqueezeBertTokenizer"],
"models.t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config"],
"models.tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig", "TapasTokenizer"],
"models.transfo_xl": [
"TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TransfoXLConfig",
"TransfoXLCorpus",
"TransfoXLTokenizer",
],
"models.vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"],
"models.wav2vec2": [
"WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Wav2Vec2Config",
"Wav2Vec2CTCTokenizer",
"Wav2Vec2FeatureExtractor",
"Wav2Vec2Processor",
"Wav2Vec2Tokenizer",
],
"models.xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMTokenizer"],
"models.xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"],
"models.xlm_roberta": ["XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig"],
"models.xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"],
"pipelines": [
"AutomaticSpeechRecognitionPipeline",
"Conversation",
"ConversationalPipeline",
"CsvPipelineDataFormat",
"FeatureExtractionPipeline",
"FillMaskPipeline",
"ImageClassificationPipeline",
"JsonPipelineDataFormat",
"NerPipeline",
"PipedPipelineDataFormat",
"Pipeline",
"PipelineDataFormat",
"QuestionAnsweringPipeline",
"SummarizationPipeline",
"TableQuestionAnsweringPipeline",
"Text2TextGenerationPipeline",
"TextClassificationPipeline",
"TextGenerationPipeline",
"TokenClassificationPipeline",
"TranslationPipeline",
"ZeroShotClassificationPipeline",
"pipeline",
],
"tokenization_utils": ["PreTrainedTokenizer"],
"tokenization_utils_base": [
"AddedToken",
"BatchEncoding",
"CharSpan",
"PreTrainedTokenizerBase",
"SpecialTokensMixin",
"TokenSpan",
],
"trainer_callback": [
"DefaultFlowCallback",
"EarlyStoppingCallback",
"PrinterCallback",
"ProgressCallback",
"TrainerCallback",
"TrainerControl",
"TrainerState",
],
"trainer_utils": ["EvalPrediction", "IntervalStrategy", "SchedulerType", "set_seed"],
"training_args": ["TrainingArguments"],
"training_args_seq2seq": ["Seq2SeqTrainingArguments"],
"training_args_tf": ["TFTrainingArguments"],
"utils": ["logging"],
}
# sentencepiece-backed objects
if is_sentencepiece_available():
_import_structure["models.albert"].append("AlbertTokenizer")
_import_structure["models.barthez"].append("BarthezTokenizer")
_import_structure["models.bert_generation"].append("BertGenerationTokenizer")
_import_structure["models.camembert"].append("CamembertTokenizer")
_import_structure["models.deberta_v2"].append("DebertaV2Tokenizer")
_import_structure["models.m2m_100"].append("M2M100Tokenizer")
_import_structure["models.marian"].append("MarianTokenizer")
_import_structure["models.mbart"].append("MBartTokenizer")
_import_structure["models.mbart"].append("MBart50Tokenizer")
_import_structure["models.mt5"].append("MT5Tokenizer")
_import_structure["models.pegasus"].append("PegasusTokenizer")
_import_structure["models.reformer"].append("ReformerTokenizer")
_import_structure["models.speech_to_text"].append("Speech2TextTokenizer")
_import_structure["models.t5"].append("T5Tokenizer")
_import_structure["models.xlm_prophetnet"].append("XLMProphetNetTokenizer")
_import_structure["models.xlm_roberta"].append("XLMRobertaTokenizer")
_import_structure["models.xlnet"].append("XLNetTokenizer")
else:
from .utils import dummy_sentencepiece_objects
_import_structure["utils.dummy_sentencepiece_objects"] = [
name for name in dir(dummy_sentencepiece_objects) if not name.startswith("_")
]
# tokenizers-backed objects
if is_tokenizers_available():
# Fast tokenizers
_import_structure["models.roformer"].append("RoFormerTokenizerFast")
_import_structure["models.clip"].append("CLIPTokenizerFast")
_import_structure["models.convbert"].append("ConvBertTokenizerFast")
_import_structure["models.albert"].append("AlbertTokenizerFast")
_import_structure["models.bart"].append("BartTokenizerFast")
_import_structure["models.barthez"].append("BarthezTokenizerFast")
_import_structure["models.bert"].append("BertTokenizerFast")
_import_structure["models.big_bird"].append("BigBirdTokenizerFast")
_import_structure["models.camembert"].append("CamembertTokenizerFast")
_import_structure["models.deberta"].append("DebertaTokenizerFast")
_import_structure["models.distilbert"].append("DistilBertTokenizerFast")
_import_structure["models.dpr"].extend(
["DPRContextEncoderTokenizerFast", "DPRQuestionEncoderTokenizerFast", "DPRReaderTokenizerFast"]
)
_import_structure["models.electra"].append("ElectraTokenizerFast")
_import_structure["models.funnel"].append("FunnelTokenizerFast")
_import_structure["models.gpt2"].append("GPT2TokenizerFast")
_import_structure["models.herbert"].append("HerbertTokenizerFast")
_import_structure["models.layoutlm"].append("LayoutLMTokenizerFast")
_import_structure["models.led"].append("LEDTokenizerFast")
_import_structure["models.longformer"].append("LongformerTokenizerFast")
_import_structure["models.lxmert"].append("LxmertTokenizerFast")
_import_structure["models.mbart"].append("MBartTokenizerFast")
_import_structure["models.mbart"].append("MBart50TokenizerFast")
_import_structure["models.mobilebert"].append("MobileBertTokenizerFast")
_import_structure["models.mpnet"].append("MPNetTokenizerFast")
_import_structure["models.mt5"].append("MT5TokenizerFast")
_import_structure["models.openai"].append("OpenAIGPTTokenizerFast")
_import_structure["models.pegasus"].append("PegasusTokenizerFast")
_import_structure["models.reformer"].append("ReformerTokenizerFast")
_import_structure["models.retribert"].append("RetriBertTokenizerFast")
_import_structure["models.roberta"].append("RobertaTokenizerFast")
_import_structure["models.squeezebert"].append("SqueezeBertTokenizerFast")
_import_structure["models.t5"].append("T5TokenizerFast")
_import_structure["models.xlm_roberta"].append("XLMRobertaTokenizerFast")
_import_structure["models.xlnet"].append("XLNetTokenizerFast")
_import_structure["tokenization_utils_fast"] = ["PreTrainedTokenizerFast"]
else:
from .utils import dummy_tokenizers_objects
_import_structure["utils.dummy_tokenizers_objects"] = [
name for name in dir(dummy_tokenizers_objects) if not name.startswith("_")
]
if is_sentencepiece_available() and is_tokenizers_available():
_import_structure["convert_slow_tokenizer"] = ["SLOW_TO_FAST_CONVERTERS", "convert_slow_tokenizer"]
else:
from .utils import dummy_sentencepiece_and_tokenizers_objects
_import_structure["utils.dummy_sentencepiece_and_tokenizers_objects"] = [
name for name in dir(dummy_sentencepiece_and_tokenizers_objects) if not name.startswith("_")
]
# Speech-specific objects
if is_speech_available():
_import_structure["models.speech_to_text"].append("Speech2TextFeatureExtractor")
else:
from .utils import dummy_speech_objects
_import_structure["utils.dummy_speech_objects"] = [
name for name in dir(dummy_speech_objects) if not name.startswith("_")
]
if is_sentencepiece_available() and is_speech_available():
_import_structure["models.speech_to_text"].append("Speech2TextProcessor")
else:
from .utils import dummy_sentencepiece_and_speech_objects
_import_structure["utils.dummy_sentencepiece_and_speech_objects"] = [
name for name in dir(dummy_sentencepiece_and_speech_objects) if not name.startswith("_")
]
# Vision-specific objects
if is_vision_available():
_import_structure["image_utils"] = ["ImageFeatureExtractionMixin"]
_import_structure["models.clip"].append("CLIPFeatureExtractor")
_import_structure["models.clip"].append("CLIPProcessor")
_import_structure["models.deit"].append("DeiTFeatureExtractor")
_import_structure["models.vit"].append("ViTFeatureExtractor")
else:
from .utils import dummy_vision_objects
_import_structure["utils.dummy_vision_objects"] = [
name for name in dir(dummy_vision_objects) if not name.startswith("_")
]
# PyTorch-backed objects
if is_torch_available():
_import_structure["benchmark.benchmark"] = ["PyTorchBenchmark"]
_import_structure["benchmark.benchmark_args"] = ["PyTorchBenchmarkArguments"]
_import_structure["data.data_collator"] = [
"DataCollator",
"DataCollatorForLanguageModeling",
"DataCollatorForPermutationLanguageModeling",
"DataCollatorForSeq2Seq",
"DataCollatorForSOP",
"DataCollatorForTokenClassification",
"DataCollatorForWholeWordMask",
"DataCollatorWithPadding",
"default_data_collator",
]
_import_structure["data.datasets"] = [
"GlueDataset",
"GlueDataTrainingArguments",
"LineByLineTextDataset",
"LineByLineWithRefDataset",
"LineByLineWithSOPTextDataset",
"SquadDataset",
"SquadDataTrainingArguments",
"TextDataset",
"TextDatasetForNextSentencePrediction",
]
_import_structure["generation_beam_search"] = ["BeamScorer", "BeamSearchScorer"]
_import_structure["generation_logits_process"] = [
"ForcedBOSTokenLogitsProcessor",
"ForcedEOSTokenLogitsProcessor",
"HammingDiversityLogitsProcessor",
"InfNanRemoveLogitsProcessor",
"LogitsProcessor",
"LogitsProcessorList",
"LogitsWarper",
"MinLengthLogitsProcessor",
"NoBadWordsLogitsProcessor",
"NoRepeatNGramLogitsProcessor",
"PrefixConstrainedLogitsProcessor",
"RepetitionPenaltyLogitsProcessor",
"TemperatureLogitsWarper",
"TopKLogitsWarper",
"TopPLogitsWarper",
]
_import_structure["generation_stopping_criteria"] = [
"MaxLengthCriteria",
"MaxTimeCriteria",
"StoppingCriteria",
"StoppingCriteriaList",
]
_import_structure["generation_utils"] = ["top_k_top_p_filtering"]
_import_structure["modeling_utils"] = ["Conv1D", "PreTrainedModel", "apply_chunking_to_forward", "prune_layer"]
# PyTorch models structure
_import_structure["models.albert"].extend(
[
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
)
_import_structure["models.auto"].extend(
[
"MODEL_FOR_CAUSAL_LM_MAPPING",
"MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
"MODEL_FOR_MASKED_LM_MAPPING",
"MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
"MODEL_FOR_PRETRAINING_MAPPING",
"MODEL_FOR_QUESTION_ANSWERING_MAPPING",
"MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
"MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
"MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
"MODEL_MAPPING",
"MODEL_WITH_LM_HEAD_MAPPING",
"AutoModel",
"AutoModelForCausalLM",
"AutoModelForImageClassification",
"AutoModelForMaskedLM",
"AutoModelForMultipleChoice",
"AutoModelForNextSentencePrediction",
"AutoModelForPreTraining",
"AutoModelForQuestionAnswering",
"AutoModelForSeq2SeqLM",
"AutoModelForSequenceClassification",
"AutoModelForTableQuestionAnswering",
"AutoModelForTokenClassification",
"AutoModelWithLMHead",
]
)
_import_structure["models.bart"].extend(
[
"BART_PRETRAINED_MODEL_ARCHIVE_LIST",
"BartForCausalLM",
"BartForConditionalGeneration",
"BartForQuestionAnswering",
"BartForSequenceClassification",
"BartModel",
"BartPretrainedModel",
"PretrainedBartModel",
]
)
_import_structure["models.bert"].extend(
[
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
)
_import_structure["models.bert_generation"].extend(
[
"BertGenerationDecoder",
"BertGenerationEncoder",
"load_tf_weights_in_bert_generation",
]
)
_import_structure["models.big_bird"].extend(
[
"BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdForCausalLM",
"BigBirdForMaskedLM",
"BigBirdForMultipleChoice",
"BigBirdForPreTraining",
"BigBirdForQuestionAnswering",
"BigBirdForSequenceClassification",
"BigBirdForTokenClassification",
"BigBirdLayer",
"BigBirdModel",
"BigBirdPreTrainedModel",
"load_tf_weights_in_big_bird",
]
)
_import_structure["models.bigbird_pegasus"].extend(
[
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
]
)
_import_structure["models.blenderbot"].extend(
[
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
]
)
_import_structure["models.blenderbot_small"].extend(
[
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
]
)
_import_structure["models.camembert"].extend(
[
"CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CamembertForCausalLM",
"CamembertForMaskedLM",
"CamembertForMultipleChoice",
"CamembertForQuestionAnswering",
"CamembertForSequenceClassification",
"CamembertForTokenClassification",
"CamembertModel",
]
)
_import_structure["models.clip"].extend(
[
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPVisionModel",
]
)
_import_structure["models.convbert"].extend(
[
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
)
_import_structure["models.ctrl"].extend(
[
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
)
_import_structure["models.deberta"].extend(
[
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
)
_import_structure["models.deberta_v2"].extend(
[
"DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaV2ForMaskedLM",
"DebertaV2ForQuestionAnswering",
"DebertaV2ForSequenceClassification",
"DebertaV2ForTokenClassification",
"DebertaV2Model",
"DebertaV2PreTrainedModel",
]
)
_import_structure["models.deit"].extend(
[
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTModel",
"DeiTPreTrainedModel",
]
)
_import_structure["models.distilbert"].extend(
[
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
)
_import_structure["models.dpr"].extend(
[
"DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPRContextEncoder",
"DPRPretrainedContextEncoder",
"DPRPretrainedQuestionEncoder",
"DPRPretrainedReader",
"DPRQuestionEncoder",
"DPRReader",
]
)
_import_structure["models.electra"].extend(
[
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
)
_import_structure["models.encoder_decoder"].append("EncoderDecoderModel")
_import_structure["models.flaubert"].extend(
[
"FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaubertForMultipleChoice",
"FlaubertForQuestionAnswering",
"FlaubertForQuestionAnsweringSimple",
"FlaubertForSequenceClassification",
"FlaubertForTokenClassification",
"FlaubertModel",
"FlaubertWithLMHeadModel",
]
)
_import_structure["models.fsmt"].extend(["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"])
_import_structure["models.funnel"].extend(
[
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"load_tf_weights_in_funnel",
]
)
_import_structure["models.gpt2"].extend(
[
"GPT2_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPT2DoubleHeadsModel",
"GPT2ForSequenceClassification",
"GPT2LMHeadModel",
"GPT2Model",
"GPT2PreTrainedModel",
"load_tf_weights_in_gpt2",
]
)
_import_structure["models.gpt_neo"].extend(
[
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
)
_import_structure["models.ibert"].extend(
[
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
)
_import_structure["models.layoutlm"].extend(
[
"LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMForMaskedLM",
"LayoutLMForSequenceClassification",
"LayoutLMForTokenClassification",
"LayoutLMModel",
]
)
_import_structure["models.led"].extend(
[
"LED_PRETRAINED_MODEL_ARCHIVE_LIST",
"LEDForConditionalGeneration",
"LEDForQuestionAnswering",
"LEDForSequenceClassification",
"LEDModel",
]
)
_import_structure["models.longformer"].extend(
[
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerSelfAttention",
]
)
_import_structure["models.luke"].extend(
[
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeModel",
"LukePreTrainedModel",
]
)
_import_structure["models.lxmert"].extend(
[
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
)
_import_structure["models.m2m_100"].extend(
[
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
]
)
_import_structure["models.marian"].extend(["MarianForCausalLM", "MarianModel", "MarianMTModel"])
_import_structure["models.mbart"].extend(
[
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
]
)
_import_structure["models.megatron_bert"].extend(
[
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
]
)
_import_structure["models.mmbt"].extend(["MMBTForClassification", "MMBTModel", "ModalEmbeddings"])
_import_structure["models.mobilebert"].extend(
[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
)
_import_structure["models.mpnet"].extend(
[
"MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"MPNetForMaskedLM",
"MPNetForMultipleChoice",
"MPNetForQuestionAnswering",
"MPNetForSequenceClassification",
"MPNetForTokenClassification",
"MPNetLayer",
"MPNetModel",
"MPNetPreTrainedModel",
]
)
_import_structure["models.mt5"].extend(["MT5EncoderModel", "MT5ForConditionalGeneration", "MT5Model"])
_import_structure["models.openai"].extend(
[
"OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OpenAIGPTDoubleHeadsModel",
"OpenAIGPTForSequenceClassification",
"OpenAIGPTLMHeadModel",
"OpenAIGPTModel",
"OpenAIGPTPreTrainedModel",
"load_tf_weights_in_openai_gpt",
]
)
_import_structure["models.pegasus"].extend(
["PegasusForCausalLM", "PegasusForConditionalGeneration", "PegasusModel"]
)
_import_structure["models.prophetnet"].extend(
[
"PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ProphetNetDecoder",
"ProphetNetEncoder",
"ProphetNetForCausalLM",
"ProphetNetForConditionalGeneration",
"ProphetNetModel",
"ProphetNetPreTrainedModel",
]
)
_import_structure["models.rag"].extend(["RagModel", "RagSequenceForGeneration", "RagTokenForGeneration"])
_import_structure["models.reformer"].extend(
[
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
]
)
_import_structure["models.retribert"].extend(
["RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RetriBertModel", "RetriBertPreTrainedModel"]
)
_import_structure["models.roberta"].extend(
[
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
]
)
_import_structure["models.roformer"].extend(
[
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
)
_import_structure["models.speech_to_text"].extend(
[
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
]
)
_import_structure["models.squeezebert"].extend(
[
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
)
_import_structure["models.t5"].extend(
[
"T5_PRETRAINED_MODEL_ARCHIVE_LIST",
"T5EncoderModel",
"T5ForConditionalGeneration",
"T5Model",
"T5PreTrainedModel",
"load_tf_weights_in_t5",
]
)
_import_structure["models.tapas"].extend(
[
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
]
)
_import_structure["models.transfo_xl"].extend(
[
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
)
_import_structure["models.vit"].extend(
[
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTModel",
"ViTPreTrainedModel",
]
)
_import_structure["models.wav2vec2"].extend(
[
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
)
_import_structure["models.xlm"].extend(
[
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
)
_import_structure["models.xlm_prophetnet"].extend(
[
"XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMProphetNetDecoder",
"XLMProphetNetEncoder",
"XLMProphetNetForCausalLM",
"XLMProphetNetForConditionalGeneration",
"XLMProphetNetModel",
]
)
_import_structure["models.xlm_roberta"].extend(
[
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
]
)
_import_structure["models.xlnet"].extend(
[
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
)
_import_structure["optimization"] = [
"Adafactor",
"AdamW",
"get_constant_schedule",
"get_constant_schedule_with_warmup",
"get_cosine_schedule_with_warmup",
"get_cosine_with_hard_restarts_schedule_with_warmup",
"get_linear_schedule_with_warmup",
"get_polynomial_decay_schedule_with_warmup",
"get_scheduler",
]
_import_structure["trainer"] = ["Trainer"]
_import_structure["trainer_pt_utils"] = ["torch_distributed_zero_first"]
_import_structure["trainer_seq2seq"] = ["Seq2SeqTrainer"]
else:
from .utils import dummy_pt_objects
_import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")]
# TensorFlow-backed objects
if is_tf_available():
_import_structure["benchmark.benchmark_args_tf"] = ["TensorFlowBenchmarkArguments"]
_import_structure["benchmark.benchmark_tf"] = ["TensorFlowBenchmark"]
_import_structure["generation_tf_utils"] = ["tf_top_k_top_p_filtering"]
_import_structure["modeling_tf_utils"] = [
"TFPreTrainedModel",
"TFSequenceSummary",
"TFSharedEmbeddings",
"shape_list",
]
# TensorFlow models structure
_import_structure["models.albert"].extend(
[
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
)
_import_structure["models.auto"].extend(
[
"TF_MODEL_FOR_CAUSAL_LM_MAPPING",
"TF_MODEL_FOR_MASKED_LM_MAPPING",
"TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
"TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
"TF_MODEL_FOR_PRETRAINING_MAPPING",
"TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
"TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
"TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
"TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
"TF_MODEL_MAPPING",
"TF_MODEL_WITH_LM_HEAD_MAPPING",
"TFAutoModel",
"TFAutoModelForCausalLM",
"TFAutoModelForMaskedLM",
"TFAutoModelForMultipleChoice",
"TFAutoModelForPreTraining",
"TFAutoModelForQuestionAnswering",
"TFAutoModelForSeq2SeqLM",
"TFAutoModelForSequenceClassification",
"TFAutoModelForTokenClassification",
"TFAutoModelWithLMHead",
]
)
_import_structure["models.bart"].extend(["TFBartForConditionalGeneration", "TFBartModel", "TFBartPretrainedModel"])
_import_structure["models.bert"].extend(
[
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
)
_import_structure["models.blenderbot"].extend(["TFBlenderbotForConditionalGeneration", "TFBlenderbotModel"])
_import_structure["models.blenderbot_small"].extend(
["TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel"]
)
_import_structure["models.camembert"].extend(
[
"TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCamembertForMaskedLM",
"TFCamembertForMultipleChoice",
"TFCamembertForQuestionAnswering",
"TFCamembertForSequenceClassification",
"TFCamembertForTokenClassification",
"TFCamembertModel",
]
)
_import_structure["models.convbert"].extend(
[
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
)
_import_structure["models.ctrl"].extend(
[
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
)
_import_structure["models.distilbert"].extend(
[
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
)
_import_structure["models.dpr"].extend(
[
"TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDPRContextEncoder",
"TFDPRPretrainedContextEncoder",
"TFDPRPretrainedQuestionEncoder",
"TFDPRPretrainedReader",
"TFDPRQuestionEncoder",
"TFDPRReader",
]
)
_import_structure["models.electra"].extend(
[
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
)
_import_structure["models.flaubert"].extend(
[
"TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFlaubertForMultipleChoice",
"TFFlaubertForQuestionAnsweringSimple",
"TFFlaubertForSequenceClassification",
"TFFlaubertForTokenClassification",
"TFFlaubertModel",
"TFFlaubertWithLMHeadModel",
]
)
_import_structure["models.funnel"].extend(
[
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
]
)
_import_structure["models.gpt2"].extend(
[
"TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGPT2DoubleHeadsModel",
"TFGPT2ForSequenceClassification",
"TFGPT2LMHeadModel",
"TFGPT2MainLayer",
"TFGPT2Model",
"TFGPT2PreTrainedModel",
]
)
_import_structure["models.layoutlm"].extend(
[
"TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMForMaskedLM",
"TFLayoutLMForSequenceClassification",
"TFLayoutLMForTokenClassification",
"TFLayoutLMMainLayer",
"TFLayoutLMModel",
"TFLayoutLMPreTrainedModel",
]
)
_import_structure["models.led"].extend(["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"])
_import_structure["models.longformer"].extend(
[
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerSelfAttention",
]
)
_import_structure["models.lxmert"].extend(
[
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
)
_import_structure["models.marian"].extend(["TFMarianModel", "TFMarianMTModel"])
_import_structure["models.mbart"].extend(["TFMBartForConditionalGeneration", "TFMBartModel"])
_import_structure["models.mobilebert"].extend(
[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
)
_import_structure["models.mpnet"].extend(
[
"TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMPNetForMaskedLM",
"TFMPNetForMultipleChoice",
"TFMPNetForQuestionAnswering",
"TFMPNetForSequenceClassification",
"TFMPNetForTokenClassification",
"TFMPNetMainLayer",
"TFMPNetModel",
"TFMPNetPreTrainedModel",
]
)
_import_structure["models.mt5"].extend(["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"])
_import_structure["models.openai"].extend(
[
"TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFOpenAIGPTDoubleHeadsModel",
"TFOpenAIGPTForSequenceClassification",
"TFOpenAIGPTLMHeadModel",
"TFOpenAIGPTMainLayer",
"TFOpenAIGPTModel",
"TFOpenAIGPTPreTrainedModel",
]
)
_import_structure["models.pegasus"].extend(["TFPegasusForConditionalGeneration", "TFPegasusModel"])
_import_structure["models.rag"].extend(
[
"TFRagModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
)
_import_structure["models.roberta"].extend(
[
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
)
_import_structure["models.roformer"].extend(
[
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
)
_import_structure["models.t5"].extend(
[
"TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFT5EncoderModel",
"TFT5ForConditionalGeneration",
"TFT5Model",
"TFT5PreTrainedModel",
]
)
_import_structure["models.transfo_xl"].extend(
[
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
)
_import_structure["models.xlm"].extend(
[
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
)
_import_structure["models.xlm_roberta"].extend(
[
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
]
)
_import_structure["models.xlnet"].extend(
[
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
)
_import_structure["optimization_tf"] = ["AdamWeightDecay", "GradientAccumulator", "WarmUp", "create_optimizer"]
_import_structure["trainer_tf"] = ["TFTrainer"]
else:
from .utils import dummy_tf_objects
_import_structure["utils.dummy_tf_objects"] = [name for name in dir(dummy_tf_objects) if not name.startswith("_")]
# FLAX-backed objects
if is_flax_available():
_import_structure["generation_flax_logits_process"] = [
"FlaxLogitsProcessor",
"FlaxLogitsProcessorList",
"FlaxLogitsWarper",
"FlaxTemperatureLogitsWarper",
"FlaxTopKLogitsWarper",
"FlaxTopPLogitsWarper",
]
_import_structure["modeling_flax_utils"] = ["FlaxPreTrainedModel"]
_import_structure["models.auto"].extend(
[
"FLAX_MODEL_FOR_CAUSAL_LM_MAPPING",
"FLAX_MODEL_FOR_MASKED_LM_MAPPING",
"FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
"FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
"FLAX_MODEL_FOR_PRETRAINING_MAPPING",
"FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
"FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
"FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
"FLAX_MODEL_MAPPING",
"FlaxAutoModel",
"FlaxAutoModelForCausalLM",
"FlaxAutoModelForMaskedLM",
"FlaxAutoModelForMultipleChoice",
"FlaxAutoModelForNextSentencePrediction",
"FlaxAutoModelForPreTraining",
"FlaxAutoModelForQuestionAnswering",
"FlaxAutoModelForSequenceClassification",
"FlaxAutoModelForTokenClassification",
]
)
_import_structure["models.bert"].extend(
[
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
)
_import_structure["models.electra"].extend(
[
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
)
_import_structure["models.gpt2"].extend(["FlaxGPT2LMHeadModel", "FlaxGPT2Model"])
_import_structure["models.roberta"].extend(
[
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
)
else:
from .utils import dummy_flax_objects
_import_structure["utils.dummy_flax_objects"] = [
name for name in dir(dummy_flax_objects) if not name.startswith("_")
]
# Direct imports for type-checking
if TYPE_CHECKING:
# Configuration
from .configuration_utils import PretrainedConfig
# Data
from .data import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadV1Processor,
SquadV2Processor,
glue_compute_metrics,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_compute_metrics,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
# Feature Extractor
from .feature_extraction_utils import BatchFeature, SequenceFeatureExtractor
# Files and general utilities
from .file_utils import (
CONFIG_NAME,
MODEL_CARD_NAME,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
TensorType,
add_end_docstrings,
add_start_docstrings,
cached_path,
is_apex_available,
is_datasets_available,
is_faiss_available,
is_flax_available,
is_psutil_available,
is_py3nvml_available,
is_sentencepiece_available,
is_sklearn_available,
is_speech_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_vision_available,
)
from .hf_argparser import HfArgumentParser
# Integrations
from .integrations import (
is_comet_available,
is_optuna_available,
is_ray_available,
is_ray_tune_available,
is_tensorboard_available,
is_wandb_available,
)
# Model Cards
from .modelcard import ModelCard
# TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import (
convert_tf_weight_name_to_pt_weight_name,
load_pytorch_checkpoint_in_tf2_model,
load_pytorch_model_in_tf2_model,
load_pytorch_weights_in_tf2_model,
load_tf2_checkpoint_in_pytorch_model,
load_tf2_model_in_pytorch_model,
load_tf2_weights_in_pytorch_model,
)
from .models.albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
from .models.auto import (
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
MODEL_NAMES_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoTokenizer,
)
from .models.bart import BartConfig, BartTokenizer
from .models.bert import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BasicTokenizer,
BertConfig,
BertTokenizer,
WordpieceTokenizer,
)
from .models.bert_generation import BertGenerationConfig
from .models.bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
from .models.bertweet import BertweetTokenizer
from .models.big_bird import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdConfig, BigBirdTokenizer
from .models.bigbird_pegasus import BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig
from .models.blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotTokenizer
from .models.blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallTokenizer,
)
from .models.camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
from .models.clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPTextConfig,
CLIPTokenizer,
CLIPVisionConfig,
)
from .models.convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer
from .models.cpm import CpmTokenizer
from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer
from .models.deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaTokenizer
from .models.deberta_v2 import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config
from .models.deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig
from .models.distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer
from .models.dpr import (
DPR_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPRConfig,
DPRContextEncoderTokenizer,
DPRQuestionEncoderTokenizer,
DPRReaderOutput,
DPRReaderTokenizer,
)
from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer
from .models.encoder_decoder import EncoderDecoderConfig
from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer
from .models.fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig, FSMTTokenizer
from .models.funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig, FunnelTokenizer
from .models.gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2Tokenizer
from .models.gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig
from .models.herbert import HerbertTokenizer
from .models.ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig
from .models.layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMTokenizer
from .models.led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig, LEDTokenizer
from .models.longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer
from .models.luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig, LukeTokenizer
from .models.lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer
from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config
from .models.marian import MarianConfig
from .models.mbart import MBartConfig
from .models.megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
from .models.mmbt import MMBTConfig
from .models.mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertTokenizer
from .models.mpnet import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig, MPNetTokenizer
from .models.mt5 import MT5Config
from .models.openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig, OpenAIGPTTokenizer
from .models.pegasus import PegasusConfig
from .models.phobert import PhobertTokenizer
from .models.prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig, ProphetNetTokenizer
from .models.rag import RagConfig, RagRetriever, RagTokenizer
from .models.reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
from .models.retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig, RetriBertTokenizer
from .models.roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaTokenizer
from .models.roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerTokenizer
from .models.speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig
from .models.squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertTokenizer
from .models.t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
from .models.tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig, TapasTokenizer
from .models.transfo_xl import (
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
TransfoXLConfig,
TransfoXLCorpus,
TransfoXLTokenizer,
)
from .models.vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig
from .models.wav2vec2 import (
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
Wav2Vec2Config,
Wav2Vec2CTCTokenizer,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
Wav2Vec2Tokenizer,
)
from .models.xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMTokenizer
from .models.xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig
from .models.xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig
from .models.xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
# Pipelines
from .pipelines import (
AutomaticSpeechRecognitionPipeline,
Conversation,
ConversationalPipeline,
CsvPipelineDataFormat,
FeatureExtractionPipeline,
FillMaskPipeline,
ImageClassificationPipeline,
JsonPipelineDataFormat,
NerPipeline,
PipedPipelineDataFormat,
Pipeline,
PipelineDataFormat,
QuestionAnsweringPipeline,
SummarizationPipeline,
TableQuestionAnsweringPipeline,
Text2TextGenerationPipeline,
TextClassificationPipeline,
TextGenerationPipeline,
TokenClassificationPipeline,
TranslationPipeline,
ZeroShotClassificationPipeline,
pipeline,
)
# Tokenization
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_utils_base import (
AddedToken,
BatchEncoding,
CharSpan,
PreTrainedTokenizerBase,
SpecialTokensMixin,
TokenSpan,
)
# Trainer
from .trainer_callback import (
DefaultFlowCallback,
EarlyStoppingCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_utils import EvalPrediction, IntervalStrategy, SchedulerType, set_seed
from .training_args import TrainingArguments
from .training_args_seq2seq import Seq2SeqTrainingArguments
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_sentencepiece_available():
from .models.albert import AlbertTokenizer
from .models.barthez import BarthezTokenizer
from .models.bert_generation import BertGenerationTokenizer
from .models.camembert import CamembertTokenizer
from .models.deberta_v2 import DebertaV2Tokenizer
from .models.m2m_100 import M2M100Tokenizer
from .models.marian import MarianTokenizer
from .models.mbart import MBart50Tokenizer, MBartTokenizer
from .models.mt5 import MT5Tokenizer
from .models.pegasus import PegasusTokenizer
from .models.reformer import ReformerTokenizer
from .models.speech_to_text import Speech2TextTokenizer
from .models.t5 import T5Tokenizer
from .models.xlm_prophetnet import XLMProphetNetTokenizer
from .models.xlm_roberta import XLMRobertaTokenizer
from .models.xlnet import XLNetTokenizer
else:
from .utils.dummy_sentencepiece_objects import *
if is_tokenizers_available():
from .models.albert import AlbertTokenizerFast
from .models.bart import BartTokenizerFast
from .models.barthez import BarthezTokenizerFast
from .models.bert import BertTokenizerFast
from .models.big_bird import BigBirdTokenizerFast
from .models.camembert import CamembertTokenizerFast
from .models.clip import CLIPTokenizerFast
from .models.convbert import ConvBertTokenizerFast
from .models.deberta import DebertaTokenizerFast
from .models.distilbert import DistilBertTokenizerFast
from .models.dpr import DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast
from .models.electra import ElectraTokenizerFast
from .models.funnel import FunnelTokenizerFast
from .models.gpt2 import GPT2TokenizerFast
from .models.herbert import HerbertTokenizerFast
from .models.layoutlm import LayoutLMTokenizerFast
from .models.led import LEDTokenizerFast
from .models.longformer import LongformerTokenizerFast
from .models.lxmert import LxmertTokenizerFast
from .models.mbart import MBart50TokenizerFast, MBartTokenizerFast
from .models.mobilebert import MobileBertTokenizerFast
from .models.mpnet import MPNetTokenizerFast
from .models.mt5 import MT5TokenizerFast
from .models.openai import OpenAIGPTTokenizerFast
from .models.pegasus import PegasusTokenizerFast
from .models.reformer import ReformerTokenizerFast
from .models.retribert import RetriBertTokenizerFast
from .models.roberta import RobertaTokenizerFast
from .models.roformer import RoFormerTokenizerFast
from .models.squeezebert import SqueezeBertTokenizerFast
from .models.t5 import T5TokenizerFast
from .models.xlm_roberta import XLMRobertaTokenizerFast
from .models.xlnet import XLNetTokenizerFast
from .tokenization_utils_fast import PreTrainedTokenizerFast
else:
from .utils.dummy_tokenizers_objects import *
if is_sentencepiece_available() and is_tokenizers_available():
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer
else:
from .utils.dummies_sentencepiece_and_tokenizers_objects import *
if is_speech_available():
from .models.speech_to_text import Speech2TextFeatureExtractor
else:
from .utils.dummy_speech_objects import *
if is_speech_available() and is_sentencepiece_available():
from .models.speech_to_text import Speech2TextProcessor
else:
from .utils.dummy_sentencepiece_and_speech_objects import *
if is_vision_available():
from .image_utils import ImageFeatureExtractionMixin
from .models.clip import CLIPFeatureExtractor, CLIPProcessor
from .models.deit import DeiTFeatureExtractor
from .models.vit import ViTFeatureExtractor
else:
from .utils.dummy_vision_objects import *
# Modeling
if is_torch_available():
# Benchmarks
from .benchmark.benchmark import PyTorchBenchmark
from .benchmark.benchmark_args import PyTorchBenchmarkArguments
from .data.data_collator import (
DataCollator,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeq2Seq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
default_data_collator,
)
from .data.datasets import (
GlueDataset,
GlueDataTrainingArguments,
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
SquadDataset,
SquadDataTrainingArguments,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .generation_beam_search import BeamScorer, BeamSearchScorer
from .generation_logits_process import (
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessor,
LogitsProcessorList,
LogitsWarper,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from .generation_stopping_criteria import (
MaxLengthCriteria,
MaxTimeCriteria,
StoppingCriteria,
StoppingCriteriaList,
)
from .generation_utils import top_k_top_p_filtering
from .modeling_utils import Conv1D, PreTrainedModel, apply_chunking_to_forward, prune_layer
from .models.albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
from .models.auto import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoModel,
AutoModelForCausalLM,
AutoModelForImageClassification,
AutoModelForMaskedLM,
AutoModelForMultipleChoice,
AutoModelForNextSentencePrediction,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTableQuestionAnswering,
AutoModelForTokenClassification,
AutoModelWithLMHead,
)
from .models.bart import (
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BartForCausalLM,
BartForConditionalGeneration,
BartForQuestionAnswering,
BartForSequenceClassification,
BartModel,
BartPretrainedModel,
PretrainedBartModel,
)
from .models.bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
from .models.bert_generation import (
BertGenerationDecoder,
BertGenerationEncoder,
load_tf_weights_in_bert_generation,
)
from .models.big_bird import (
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdForCausalLM,
BigBirdForMaskedLM,
BigBirdForMultipleChoice,
BigBirdForPreTraining,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
BigBirdLayer,
BigBirdModel,
BigBirdPreTrainedModel,
load_tf_weights_in_big_bird,
)
from .models.bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
)
from .models.blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
)
from .models.blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
)
from .models.camembert import (
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
CamembertForCausalLM,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForQuestionAnswering,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from .models.clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPVisionModel,
)
from .models.convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
from .models.ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
from .models.deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
from .models.deberta_v2 import (
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaV2ForMaskedLM,
DebertaV2ForQuestionAnswering,
DebertaV2ForSequenceClassification,
DebertaV2ForTokenClassification,
DebertaV2Model,
DebertaV2PreTrainedModel,
)
from .models.deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTModel,
DeiTPreTrainedModel,
)
from .models.distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
from .models.dpr import (
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPRContextEncoder,
DPRPretrainedContextEncoder,
DPRPretrainedQuestionEncoder,
DPRPretrainedReader,
DPRQuestionEncoder,
DPRReader,
)
from .models.electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
from .models.encoder_decoder import EncoderDecoderModel
from .models.flaubert import (
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from .models.fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel
from .models.funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
load_tf_weights_in_funnel,
)
from .models.gpt2 import (
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2DoubleHeadsModel,
GPT2ForSequenceClassification,
GPT2LMHeadModel,
GPT2Model,
GPT2PreTrainedModel,
load_tf_weights_in_gpt2,
)
from .models.gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
from .models.ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
from .models.layoutlm import (
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
LayoutLMModel,
)
from .models.led import (
LED_PRETRAINED_MODEL_ARCHIVE_LIST,
LEDForConditionalGeneration,
LEDForQuestionAnswering,
LEDForSequenceClassification,
LEDModel,
)
from .models.longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerSelfAttention,
)
from .models.luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeModel,
LukePreTrainedModel,
)
from .models.lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
from .models.m2m_100 import M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, M2M100ForConditionalGeneration, M2M100Model
from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel
from .models.mbart import (
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
)
from .models.megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
from .models.mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
from .models.mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
from .models.mpnet import (
MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetLayer,
MPNetModel,
MPNetPreTrainedModel,
)
from .models.mt5 import MT5EncoderModel, MT5ForConditionalGeneration, MT5Model
from .models.openai import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
OpenAIGPTPreTrainedModel,
load_tf_weights_in_openai_gpt,
)
from .models.pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel
from .models.prophetnet import (
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ProphetNetDecoder,
ProphetNetEncoder,
ProphetNetForCausalLM,
ProphetNetForConditionalGeneration,
ProphetNetModel,
ProphetNetPreTrainedModel,
)
from .models.rag import RagModel, RagSequenceForGeneration, RagTokenForGeneration
from .models.reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
)
from .models.retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel
from .models.roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from .models.roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
from .models.speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
Speech2TextForConditionalGeneration,
Speech2TextModel,
)
from .models.squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
from .models.t5 import (
T5_PRETRAINED_MODEL_ARCHIVE_LIST,
T5EncoderModel,
T5ForConditionalGeneration,
T5Model,
T5PreTrainedModel,
load_tf_weights_in_t5,
)
from .models.tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
)
from .models.transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
from .models.vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTModel,
ViTPreTrainedModel,
)
from .models.wav2vec2 import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
Wav2Vec2ForCTC,
Wav2Vec2ForMaskedLM,
Wav2Vec2Model,
Wav2Vec2PreTrainedModel,
)
from .models.xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
from .models.xlm_prophetnet import (
XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMProphetNetDecoder,
XLMProphetNetEncoder,
XLMProphetNetForCausalLM,
XLMProphetNetForConditionalGeneration,
XLMProphetNetModel,
)
from .models.xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from .models.xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
# Optimization
from .optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
# Trainer
from .trainer import Trainer
from .trainer_pt_utils import torch_distributed_zero_first
from .trainer_seq2seq import Seq2SeqTrainer
else:
from .utils.dummy_pt_objects import *
# TensorFlow
if is_tf_available():
from .benchmark.benchmark_args_tf import TensorFlowBenchmarkArguments
# Benchmarks
from .benchmark.benchmark_tf import TensorFlowBenchmark
from .generation_tf_utils import tf_top_k_top_p_filtering
from .modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMMainLayer,
TFLayoutLMModel,
TFLayoutLMPreTrainedModel,
)
from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list
from .models.albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
from .models.auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForMultipleChoice,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
)
from .models.bart import TFBartForConditionalGeneration, TFBartModel, TFBartPretrainedModel
from .models.bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
from .models.blenderbot import TFBlenderbotForConditionalGeneration, TFBlenderbotModel
from .models.blenderbot_small import TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
from .models.camembert import (
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCamembertForMaskedLM,
TFCamembertForMultipleChoice,
TFCamembertForQuestionAnswering,
TFCamembertForSequenceClassification,
TFCamembertForTokenClassification,
TFCamembertModel,
)
from .models.convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
from .models.ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
from .models.distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
from .models.dpr import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDPRContextEncoder,
TFDPRPretrainedContextEncoder,
TFDPRPretrainedQuestionEncoder,
TFDPRPretrainedReader,
TFDPRQuestionEncoder,
TFDPRReader,
)
from .models.electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
from .models.flaubert import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
from .models.funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
from .models.gpt2 import (
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGPT2DoubleHeadsModel,
TFGPT2ForSequenceClassification,
TFGPT2LMHeadModel,
TFGPT2MainLayer,
TFGPT2Model,
TFGPT2PreTrainedModel,
)
from .models.led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel
from .models.longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerSelfAttention,
)
from .models.lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
from .models.marian import TFMarianModel, TFMarianMTModel
from .models.mbart import TFMBartForConditionalGeneration, TFMBartModel
from .models.mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
from .models.mpnet import (
TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMPNetForMaskedLM,
TFMPNetForMultipleChoice,
TFMPNetForQuestionAnswering,
TFMPNetForSequenceClassification,
TFMPNetForTokenClassification,
TFMPNetMainLayer,
TFMPNetModel,
TFMPNetPreTrainedModel,
)
from .models.mt5 import TFMT5EncoderModel, TFMT5ForConditionalGeneration, TFMT5Model
from .models.openai import (
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFOpenAIGPTDoubleHeadsModel,
TFOpenAIGPTForSequenceClassification,
TFOpenAIGPTLMHeadModel,
TFOpenAIGPTMainLayer,
TFOpenAIGPTModel,
TFOpenAIGPTPreTrainedModel,
)
from .models.pegasus import TFPegasusForConditionalGeneration, TFPegasusModel
from .models.rag import TFRagModel, TFRagSequenceForGeneration, TFRagTokenForGeneration
from .models.roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
from .models.roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
from .models.t5 import (
TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST,
TFT5EncoderModel,
TFT5ForConditionalGeneration,
TFT5Model,
TFT5PreTrainedModel,
)
from .models.transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
from .models.xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
from .models.xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
)
from .models.xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
# Optimization
from .optimization_tf import AdamWeightDecay, GradientAccumulator, WarmUp, create_optimizer
# Trainer
from .trainer_tf import TFTrainer
else:
# Import the same objects as dummies to get them in the namespace.
# They will raise an import error if the user tries to instantiate / use them.
from .utils.dummy_tf_objects import *
if is_flax_available():
from .generation_flax_logits_process import (
FlaxLogitsProcessor,
FlaxLogitsProcessorList,
FlaxLogitsWarper,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
from .modeling_flax_utils import FlaxPreTrainedModel
from .models.auto import (
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
FLAX_MODEL_FOR_PRETRAINING_MAPPING,
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
FLAX_MODEL_MAPPING,
FlaxAutoModel,
FlaxAutoModelForCausalLM,
FlaxAutoModelForMaskedLM,
FlaxAutoModelForMultipleChoice,
FlaxAutoModelForNextSentencePrediction,
FlaxAutoModelForPreTraining,
FlaxAutoModelForQuestionAnswering,
FlaxAutoModelForSequenceClassification,
FlaxAutoModelForTokenClassification,
)
from .models.bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
from .models.electra import (
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
from .models.gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model
from .models.roberta import (
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
# Import the same objects as dummies to get them in the namespace.
# They will raise an import error if the user tries to instantiate / use them.
from .utils.dummy_flax_objects import *
else:
import importlib
import os
import sys
class _LazyModule(_BaseLazyModule):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def _get_module(self, module_name: str):
return importlib.import_module("." + module_name, self.__name__)
def __getattr__(self, name: str):
# Special handling for the version, which is a constant from this module and not imported in a submodule.
if name == "__version__":
return __version__
return super().__getattr__(name)
sys.modules[__name__] = _LazyModule(__name__, _import_structure)
if not is_tf_available() and not is_torch_available() and not is_flax_available():
logger.warning(
"None of PyTorch, TensorFlow >= 2.0, or Flax have been found. "
"Models won't be available and only tokenizers, configuration "
"and file/data utilities can be used."
)
| apache-2.0 |
viswimmer1/PythonGenerator | data/python_files/33855489/pyInit.py | 1 | 31552 | import numpy as np
import copy as cp
import struct as st
import math,os,sys,progressbar,pylab
import matplotlib as mpl
from matplotlib import pyplot as plt
from numpy.lib import recfunctions
from scipy import spatial as sp
from scipy import special
from scipy.interpolate import griddata,interp1d
from scipy.ndimage import map_coordinates
def gauss_kern(size, sizey=None):
""" Returns a normalized 2D gauss kernel array for convolutions """
size = int(size)
if not sizey:
sizey = size
else:
sizey = int(sizey)
x, y = mgrid[-size:size+1, -sizey:sizey+1]
g = exp(-(x**2/float(size)+y**2/float(sizey)))
return g / g.sum()
def blur_image(im, n, ny=None) :
""" blurs the image by convolving with a gaussian kernel of typical
size n. The optional keyword argument ny allows for a different
size in the y direction.
"""
g = gauss_kern(n, sizey=ny)
improc = signal.convolve(im,g, mode='valid')
return improc
def sround(x,figs=0):
pw=np.floor(np.log10(np.abs(x)))
return 10**pw*round(x*10**-pw)
def propspace(a,b,n=50):
"""Basically just logspace, but I wanted to be able to "see" what I was
doing."""
K=((b/a)**(1./(n-1.)))-1.
out=np.zeros(n)
for i in xrange(n):
out[i]=a*((1+K)**i)
return out
def polar2cartesian(r, t, grid, x, y, order=3):
"""Coordinate transform for converting a polar array to Cartesian
coordinates. r and t are the coordinates on the current polar grid, grid
are the values at each location and x,y, are the points where we are to
interpolate in cartesian space. A spline controls the interpolation, where
order specificies the spline order (0-5)."""
X, Y = np.meshgrid(x, y)
new_r = np.sqrt(X*X+Y*Y)
new_t = np.arctan2(X, Y)
ir = interp1d(r, np.arange(len(r)), bounds_error=False)
it = interp1d(t, np.arange(len(t)))
new_ir = ir(new_r.ravel())
new_it = it(new_t.ravel())
new_ir[new_r.ravel() > r.max()] = len(r)-1
new_ir[new_r.ravel() < r.min()] = 0
return map_coordinates(grid, np.array([new_ir, new_it]),
order=order).reshape(new_r.shape)
def plot(x, y, hline=None, vline=None, box=True, xlab='', ylab='',
title='', fontsize=mpl.rcParams['font.size'], log='', pch='.', add=False, **kw):
""" General purpose plotting function. Use log on xy axis if x or y or
both appears in the string log. box plots a boxplot to the left of data
range and hline and vline include vertical and horizontal lines.
Returns list of [[fig],[box],main]
"""
pchConvert = ['.','o','^','v','<','>','p','s','+','*','x']
if not isinstance(pch,str):
pch=pchConvert[pch%len(pchConvert)]
ret = []
if not add:
figobj=plt.figure()
ret.append(figobj)
pltfunc=plt.plot
if log.find('x')>=0 and log.find('y')>=0:
pltfunc=plt.loglog
elif log.find('x')>=0:
pltfunc=plt.semilogx
elif log.find('y')>=0:
pltfunc=plt.semilogy
if box and not add:
qs=np.percentile(y,[5,25,50,75,95])
whis=np.max((qs[4]-qs[2],qs[2]-qs[0]))/(qs[3]-qs[1])
xsize=sround((x.max()-x.min())/10.)
boxobj=plt.boxplot(y,positions=[x.min()-xsize],widths=[xsize],whis=whis)
ret.append(boxobj)
#make col a synonym for color to have R like syntax
if 'col' in kw:
kw['color']=kw.pop['col']
main=pltfunc(x,y,pch,**kw)
ret.append(main)
if box and not add:
pylab.xticks(np.arange(sround(x.min()),sround(x.max()),xsize))
if hline is not None:
hline = [hline] if not isinstance(hline,list) else hline
for l in hline:
pltfunc([x.min(),x.max()],[l,l],'r')
if vline is not None:
vline = [vline] if not isinstance(vline,list) else vline
for l in vline:
pltfunc([l,l],[y.min(),y.max()],'r')
if not add:
plt.xlabel(xlab,fontsize=fontsize)
plt.ylabel(ylab,fontsize=fontsize)
plt.title(title,fontsize=fontsize)
return ret
def heatMap(x,y,z,xres=5000,yres=5000,bar=True,log=True,xlab='', ylab='',
title=''):
"""Given a set of coordinates {(x,y,z)}, grids up x and y and converts
the z values into a colour scaling."""
xi=np.linspace(x.min(),x.max(),xres)
yi=np.linspace(y.min(),y.max(),yres)
zi=griddata((x,y),np.log10(z),(xi[None,:],yi[:,None]),method='cubic')
ex=[xi.min(),xi.max(),yi.max(),yi.min()]
ret=plt.imshow(zi,extent=ex)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
if bar:
plt.colorbar()
return ret
class Disc(object):
"""This object is used to store information about an SPH disc, to calculate
infromation from this data and to plot the data in a useful way. At it's
core is an object called "data", which is populated from the SPH data.
This is a structured (named) array, SPH particles as all entries.
Unknown or missing data is represented with np.nan. The star is stored
seperately"""
#The default data type to store things as f4 is 32bit float, f8 is 64bit
#float. Change as needed
DTYPE='f8'
#The parameters will be written out in this order...
def __init__(self,G=887.2057):
self.loaded=False
self.subset=None
self.G=G
#These are the core data atributes, we'll usually have many more...
self.dt=np.dtype([('x',self.DTYPE), ('y',self.DTYPE), ('z',self.DTYPE),('m',self.DTYPE)])
def init_tree(s,force=False):
if force or not hasattr(s,"tree2"):
s.tree2=sp.KDTree(np.column_stack((s.data['x'],s.data['y'])))
if force or not hasattr(s,"tree3"):
s.tree3=sp.KDTree(np.column_stack((s.data['x'],s.data['y'],s.data['z'])))
def addCol(self,name):
"""Ensure the column mentioned exists."""
if name not in self.data.dtype.names:
self.data=recfunctions.append_fields(self.data, name,
np.repeat(np.nan,self.data.shape[0]), dtypes=self.DTYPE,
usemask=False)
def swapStar(self,data,st):
tmp=cp.deepcopy(data[-1,])
data[-1,]=data[st,]
data[st,]=tmp
return data
def fromFolder(s,folder):
os.chdir(folder)
s.pos=np.genfromtxt("Position.txt")
s.grav=np.genfromtxt("Acceleration.txt")
s.pot=np.genfromtxt("GravPotential.txt")
s.mass=np.genfromtxt("Masses.txt")
s.smooth=np.genfromtxt("Smoothing.txt")
s.rho=np.genfromtxt("Density.txt")
s.energy=np.genfromtxt("Energy.txt")
s.strarg=s.mass.argmax()
strarg=s.strarg
s.star=s.pos[strarg,]
s.Mstar=s.mass.max()
#Adjust for the position of the star
s.pos=s.pos-s.star
#The star is always the last record... Even if we don't have one (?!)
#To do this, any list with a star value will swap the last entry with
#the star, which should be consistent
row=tuple(np.repeat(np.nan,len(s.dt)))
s.data=np.array([row for i in xrange(s.pos.shape[0]-1)],dtype=s.dt)
#s.data=np.array(np.repeat(np.nan,s.pos.shape[0]-1),dtype=s.dt)
s.pos=s.swapStar(s.pos,strarg)
s.star=s.pos[s.n]
s.data['x']=s.pos[:s.n,0]
s.data['y']=s.pos[:s.n,1]
s.data['z']=s.pos[:s.n,2]
s.mass=s.swapStar(s.mass,strarg)
s.data['m']=s.mass[:s.n]
s.star=np.concatenate((s.star,[s.mass[s.n]]))
s.grav=s.swapStar(s.grav,strarg)
s.star=np.concatenate((s.star,[s.grav[s.n,2]]))
s.addCol('gz')
s.data['gz']=s.grav[:s.n,2]
s.addCol('R')
s.data['R']=np.sqrt(s.data['x']**2+s.data['y']**2)
s.addCol('r')
s.data['r']=np.sqrt(s.data['x']**2+s.data['y']**2+s.data['z']**2)
s.addCol('theta')
s.data['theta']=np.arctan2(s.data['y'],s.data['x'])
s.loaded=True
def fromFile(s,file,format="x y z m rho iphase"):
"""Lodato format is x,y,z,m,rho,iphase"""
format=format.split()
if 'x' not in format or 'y' not in format or 'z' not in format:
raise
s.fdat=np.genfromtxt(file)
row=tuple(np.repeat(np.nan,len(s.dt)))
s.data=np.array([row for i in xrange(s.fdat.shape[0]-1)],dtype=s.dt)
#s.data=np.array(np.repeat(np.nan,s.fdat.shape[0]-1),dtype=s.dt)
x=s.fdat[:,format.index('x')]
y=s.fdat[:,format.index('y')]
z=s.fdat[:,format.index('z')]
m=s.fdat[:,format.index('m')]
#Move the star
s.strarg=m.argmax()
s.Mstar=m.max()
s.star=np.array([x[s.strarg],y[s.strarg],z[s.strarg]])
x=x-x[s.strarg]
y=y-y[s.strarg]
z=z-z[s.strarg]
s.addCol("x")
s.addCol("y")
s.addCol("z")
tmp=s.swapStar(x,s.strarg)
s.data['x']=tmp[:s.n]
tmp=s.swapStar(y,s.strarg)
s.data['y']=tmp[:s.n]
tmp=s.swapStar(z,s.strarg)
s.data['z']=tmp[:s.n]
#Load everything into the array, except the star
for i,var in enumerate(format):
#Already been loaded...
if var in ['x','y','z']:
continue
s.addCol(var)
tmp=s.fdat[:,i]
tmp=s.swapStar(tmp,s.strarg)
s.data[var]=tmp[:s.n]
s.addCol('R')
s.data['R']=np.sqrt(s.data['x']**2+s.data['y']**2)
s.addCol('r')
s.data['r']=np.sqrt(s.data['x']**2+s.data['y']**2+s.data['z']**2)
s.addCol('theta')
s.data['theta']=np.arctan2(s.data['y'],s.data['x'])
s.loaded=True
def fromBinaryFile(s,file,format='gx gy gz',type='f'):
"""type should be a valid python struct variable type, e.g. f= 4-byte
float. Assumes the (idiotic) fortran binary format and that every
variable has been written in blocks of equal size."""
bsize=st.calcsize(type)
format=format.split()
f=open(file,'rb')
fdat=f.read()
regionend=-4
i=0
while regionend+8<len(fdat) and i<len(format):
size=st.unpack('i',fdat[(regionend+4):(regionend+8)])[0]/bsize
regionstart=regionend+8
regionend=regionstart+size*bsize
s.addCol(format[i])
tmp=np.array(st.unpack(type*size,fdat[regionstart:regionend]))
#Remove star if necessary
if len(tmp)==s.n+1:
tmp=s.swapStar(tmp,s.strarg)
s.data[format[i]]=tmp[:s.n]
i=i+1
@property
def N(self):
if self.subset is not None:
return len(self.subset)
return self.n
@property
def n(s):
if not hasattr(s,'data'):
return 0
return s.data.shape[0]
def fetch(self,variable):
#If it's a variable of the length of the full data table, return the
#subset, otherwise we shouldn't
if self.subset is not None and len(variable)==self.n:
return variable[self.subset]
return variable
def subsetoff(s):
#Save it for turning back on later...
if hasattr(s,"subset") and s.subset is not None:
s.__subset=s.subset
s.subset=None
def subseton(self,fields=None,N=5000,force=False):
"""All functions will act on just a subset of the data, rather than the
entirety of it. You can either supply N in which case the particles
will be randomly chosen (N of them) or supply any number of fields, in
which case the praticles will be set to those rows where there is data
available. If this is all entries or None, or you enter a stupid value
for N, the current subset will not be changed"""
#If it's already on, do nothing
if self.subset is not None:
return None
#Restore the old one if it's still there
if not force and hasattr(self,"__subset"):
self.subset=self.__subset
return None
if fields is not None:
if not isinstance(fields,list):
subset=np.where((np.isfinite(self.data[fields])))[0]
else:
subset=np.arange(self.n)
for field in fields:
subset=subset[np.where((np.isfinite(self.data[field][subset])))]
if len(subset)==0 or len(subset)==self.n:
print "Invalid fields, subset is unchanged"
else:
self.subset=subset
elif N<=0 or N>=self.n:
print "Invalid N, subset is unchanged"
else:
rand=np.random.permutation(self.n)
self.subset=rand[:min(len(rand),N)]
def calcBands(self,data,boundaries=[.6,1.,2.]):
"""Assigns a factor to the table depending on which side of the
bondaries the data specified falls on. Can be used to colour data
later when plotting..."""
if not isinstance(boundaries,list):
boundaries=[boundaries]
self.addCol('bands')
self.bandBoundaries=boundaries
data=np.abs(self.fetch(data))
if len(boundaries)<1:
raise
self.data['bands'][np.where(data<boundaries[0])]=0
count=0
for i,cut in enumerate(boundaries[1:]):
count=count+1
self.data['bands'][np.where((data>=boundaries[i]) & (data<cut))] = count
self.data['bands'][np.where((data>=boundaries[-1]))] = count+1
@property
def nBands(self):
if 'bands' in self.data.dtype.names and not np.all(np.isnan(self.data['bands'])):
return int(np.nanmax(self.data['bands'])+1)
return 0
def plot(s, x, y, newBands=False, useBands=True, hline=0, **kw):
"""General purpose plotting function to be used by other things. We can
calculate bands or display pre-calculated banding. If boundaries is
passed to this function, we pass it on to calcBands.
Returns the figure object and the series of line segments [fig,[lines]]
"""
ret=[]
#Get from strings...
if isinstance(x,str):
if 'xlab' not in kw:
kw['xlab']=x
x=s.data[x]
if isinstance(y,str):
if 'ylab' not in kw:
kw['ylab']=y
y=s.data[y]
#Otherwise, we have them already
x=s.fetch(x)
y=s.fetch(y)
if newBands:
if 'boundaries' in kw:
s.calcBands(y,boundaries=kw.pop('boundaries'))
else:
s.calcBands(y)
#Don't want to be passing a useless kw arg...
elif 'boundaries' in kw:
tmp=kw.pop('boundaries')
if useBands and 'bands' in s.data.dtype.names:
tmp=plot(x,y, pch=0,hline=hline,**kw)
ret.append(tmp[0])
lines=[]
for i in xrange(s.nBands):
o=np.where(s.data['bands']==i)
tmp=plot(x[o],y[o], pch=i+1,add=True)
lines.append(tmp[-1])
ret.append(lines)
else:
tmp=plot(x,y, hline=hline, **kw)
ret.append(tmp[0])
ret.append([tmp[-1]])
return ret
def ratioScatter(s, x, y1, y2=None, boundaries = [.6,1.], legend=True, loc="upper right", **kw):
""" Designed for plotting log ratios. If y1 and y2 both given, the y
axis becomes log2(|y1|/|y2|), otherwise y1 is assumed to be a valid log
ratio of two values."""
#Get from strings...
if isinstance(x,str):
x=s.data[x]
if isinstance(y1,str):
y1=s.data[y1]
if isinstance(y2,str):
y2=s.data[y2]
x=s.fetch(x)
y1=s.fetch(y1)
if y2 is not None:
y2=s.fetch(y2)
y=np.log2(np.abs(y1)/np.abs(y2))
else:
y=y1
ret=s.plot(x,y,boundaries=boundaries, **kw)
if legend and 'bands' in s.data.dtype.names and len(s.bandBoundaries)+1 == len(ret[-1]):
#Build the labels...
labs=["< %s" % str(s.bandBoundaries[0])]
for i in xrange(1,len(s.bandBoundaries)):
labs.append("%s - %s" %
(str(s.bandBoundaries[i-1]),str(s.bandBoundaries[i])))
labs.append("> %s" % str(s.bandBoundaries[-1]))
plt.legend(ret[-1],labs,loc=loc)
return ret
def relerrorScatter(s,x,y1,y2=None,boundaries=[10.,50.,100.,200.],
legend=True, loc="upper right", **kw):
""" Calculates the relative error and then passes it to plot"""
if isinstance(x,str):
x=s.data[x]
if isinstance(y1,str):
y1=s.data[y1]
if isinstance(y2,str):
y2=s.data[y2]
x=s.fetch(x)
y1=s.fetch(y1)
if y2 is not None:
y2=s.fetch(y2)
y=100.*(np.abs(y2-y1)/np.abs(y1))
else:
y=y1
ret=s.plot(x,y,boundaries=boundaries,**kw)
if legend and 'bands' in s.data.dtype.names and len(s.bandBoundaries)+1 == len(ret[-1]):
#Build the labels...
labs=["< %s %%" % str(s.bandBoundaries[0])]
for i in xrange(1,len(s.bandBoundaries)):
labs.append("%s %% - %s %%" %
(str(s.bandBoundaries[i-1]),str(s.bandBoundaries[i])))
labs.append("> %s %%" % str(s.bandBoundaries[-1]))
plt.legend(ret[-1],labs,loc=loc)
return ret
def surfacePlot(s,x,y,z,**kw):
"""Given a set of coordinates {(x,y,z)}, grids up x and y and converts
the z values into a colour scaling."""
if isinstance(x,str):
x=s.data[x]
if isinstance(y,str):
y=s.data[y]
if isinstance(z,str):
z=s.data[z]
x=s.fetch(x)
y=s.fetch(y)
z=s.fetch(z)
heatMap(x,y,z,**kw)
def findIndex(self,x,y,z,toll=1e-8):
"""Given x, y and z, return the indecies in data that match to within
toll. If multiple indicies match, -1 is returned"""
if len(x)!=len(y) or len(y)!=len(z) or len(x)!=len(z):
raise
index=np.repeat(-1,len(x))
widgets = [progressbar.Percentage(), progressbar.Bar(),' ',progressbar.FormatLabel('Time elapsed: %(elapsed)s'),' ',progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(x)).start()
for i in xrange(len(x)):
tmp=np.where( (np.abs(self.data['x']-x[i])<toll) &
(np.abs(self.data['y']-y[i])<toll) &
(np.abs(self.data['z']-z[i])<toll))[0]
if len(tmp)!=1:
continue
index[i]=tmp[0]
pbar.update(i)
return index
def calcsumgz(self,recalculate=False):
"""Calculates direct sum gravity and inserts it into the table"""
s=self
x=s.data['x']
y=s.data['y']
z=s.data['z']
ptx=self.fetch(x)
pty=self.fetch(y)
ptz=self.fetch(z)
#If we've calculated the values previously, no need to redo them...
index=self.fetch(np.arange(self.n))
s.addCol('sumgz')
if not recalculate:
o=np.where(np.isnan(self.data['sumgz'][index]))
ptx=ptx[o]
pty=pty[o]
ptz=ptz[o]
index=index[o]
widgets = [progressbar.Percentage(), progressbar.Bar(),' ',progressbar.FormatLabel('Time elapsed: %(elapsed)s'),' ',progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(index)).start()
for count,i in enumerate(index):
rtmp=np.sqrt((ptx[count]-s.data['x'])**2+(pty[count]-s.data['y'])**2+(ptz[count]-s.data['z'])**2)
rtmp[rtmp==0]=np.Infinity
s.data['sumgz'][i]=np.sum((s.G*s.data['m']*(s.data['z']-ptz[count]))/rtmp**3)
pbar.update(count)
def calcmidplaneCD(s,tgtN=50,maxR=5.):
s.init_tree()
s.addCol("cdMid")
widgets = [progressbar.Percentage(), progressbar.Bar(),' ',progressbar.FormatLabel('Time elapsed: %(elapsed)s'),' ',progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=s.N).start()
for count,i in enumerate(s.subset):
pbar.update(count)
pts=np.array(s.tree2.query_ball_point(list(s.data[['x','y']][i]),maxR))
#Subset to desired range...
o=np.where(np.abs(s.data['z'][pts])<=np.abs(s.data['z'][i]))[0]
pts=pts[o]
#Calculate the distances...
dist=np.sqrt((s.data['x'][pts]-s.data['x'][i])**2 +
(s.data['y'][pts]-s.data['y'][i])**2)
pts=pts[dist.argsort()]
dist=dist[dist.argsort()]
#Subset if possible
stop=tgtN if len(pts)>tgtN else len(pts)
if stop<1:
s.data['cdMid'][i]=0.
continue
h=dist[stop-1]
s.data['cdMid'][i]=np.sum(s.data['m'][pts[:stop]])/(2.*math.pi*h**2)
def calcsurCD(s,tgtN=50,maxR=5.):
s.init_tree()
s.addCol("cdSur")
widgets = [progressbar.Percentage(), progressbar.Bar(),' ',progressbar.FormatLabel('Time elapsed: %(elapsed)s'),' ',progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=s.N).start()
for count,i in enumerate(s.subset):
pbar.update(count)
pts=np.array(s.tree2.query_ball_point(list(s.data[['x','y']][i]),maxR))
#Subset to desired range...
o=np.where(np.abs(s.data['z'][pts])>=np.abs(s.data['z'][i]))[0]
pts=pts[o]
#Calculate the distances...
dist=np.sqrt((s.data['x'][pts]-s.data['x'][i])**2 +
(s.data['y'][pts]-s.data['y'][i])**2)
pts=pts[dist.argsort()]
dist=dist[dist.argsort()]
#Subset if possible
stop=tgtN if len(pts)>tgtN else len(pts)
if stop<1:
s.data['cdSur'][i]=0.
continue
h=dist[stop-1]
s.data['cdSur'][i]=np.sum(s.data['m'][pts[:stop]])/(2.*math.pi*h**2)
def calcPotential(s):
"""Calculate the potential energy for a subset of particles..."""
s.addCol("Pot")
widgets = [progressbar.Percentage(), progressbar.Bar(),' ',progressbar.FormatLabel('Time elapsed: %(elapsed)s'),' ',progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=s.N).start()
x=s.fetch(s.data['x'])
y=s.fetch(s.data['y'])
z=s.fetch(s.data['z'])
xall=s.data['x']
yall=s.data['y']
zall=s.data['z']
pot=np.zeros(s.N)
mass=np.median(s.data['m'])
for i in xrange(s.N):
pbar.update(i)
tmp=np.sqrt((xall-x[i])**2+(yall-y[i])**2+(zall-z[i])**2)
tmp[tmp==0]=np.inf
pot[i]=-mass*s.G*np.sum(1./tmp)
s.data["Pot"][s.subset]=pot
def seedMap(s,subset=False,nn=50,subsetFrac=.1,maxR=5.):
"""Calculate the column density at a subset of particles. If subset is
set to true then we calculate it at the points specified in subset,
otherwise a random set is chosen. nn is the number of neighbours to
use to calculate the surface density at each point and subsetFrac is
the fraction of total particles to calculate it for."""
s.init_tree()
if subset and s.subset is not None:
npts=s.N
rr=s.subset
else:
npts=int(s.n*subsetFrac)
rr=np.random.permutation(s.n)[:npts]
s.addCol("cdTot")
#Make some temporary lists to make calculation (a lot) faster
ghost=np.zeros((npts,3))
ghost[:,0]=s.data['x'][rr]
ghost[:,1]=s.data['y'][rr]
mm=s.data['m']
widgets = [progressbar.Percentage(), progressbar.Bar(),' ',progressbar.FormatLabel('Time elapsed: %(elapsed)s'),' ',progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=npts).start()
for i in xrange(npts):
pbar.update(i)
pts=s.tree2.query(ghost[i,:2],nn)
ghost[i,2]=np.sum(mm[pts[1]]/(2.*math.pi*np.max(pts[0])**2))
s.data['cdTot'][rr]=ghost[:,2]
def interpolateMap(s,grid=False):
"""Interpolates the surface density to surface using either the grid or
the 10% column calculation."""
s.addCol("cdTotInt")
#Create it if it hasn't been...
if not grid and not "cdTot" in s.data.dtype.names:
s.seedMap()
if grid and not hasattr(s,"gridArray"):
s.gridMap()
#Now we have to interpolate the missing points...
rr=s.subset
if rr is None:
rr=np.arange(s.n)
#Again, far faster to use a different data structure...
tmp=np.zeros((rr.shape[0],3))
tmp[:,0]=s.data['x'][rr]
tmp[:,1]=s.data['y'][rr]
widgets = [progressbar.Percentage(), progressbar.Bar(),' ',progressbar.FormatLabel('Time elapsed: %(elapsed)s'),' ',progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=s.N).start()
if grid:
gtree=sp.KDTree(s.gridArray[:,:2])
tt=s.gridArray[:,2]
else:
src=np.isfinite(s.data['cdTot'])
gtree=sp.KDTree(np.column_stack((s.data['x'][src],s.data['y'][src])))
tt=s.data['cdTot'][src]
for i in xrange(rr.shape[0]):
pbar.update(i)
pts=gtree.query(tmp[i,:2],1)
tmp[i,2]=tt[pts[1]]
s.data['cdTotInt'][rr]=tmp[:,2]
def gridMap(s,Rres=20,tres=10,log=True,minCount=10):
if log:
#Rspace=propspace(np.min(s.data['R']),np.max(s.data["R"]),Rres)
Rspace=np.array(np.percentile(s.data["R"],
list(np.linspace(0,100,Rres))))
else:
Rspace=np.linspace(np.min(s.data["R"]), np.max(s.data["R"]), Rres)
tspace=np.linspace(np.min(s.data["theta"]),np.max(s.data['theta']),
tres)
grid,Redges,tedges=np.histogram2d(s.data["R"],s.data["theta"],[Rspace,tspace])
area=np.outer((Redges[:-1]+np.diff(Redges))**2-Redges[:-1]**2,np.diff(tedges))
mass=np.median(s.data['m'])
counts=grid
grid=(grid*mass)/area
#Flatten the grid, because it'll be useful..
Rcenter=Redges[:-1]+np.diff(Redges)/2.
tcenter=tedges[:-1]+np.diff(tedges)/2.
s.gridArray=np.zeros(((Rres-1)*(tres-1),4))
Rflat=np.ndarray.flatten(np.outer(Rcenter,np.ones(tres-1)))
tflat=np.ndarray.flatten(np.outer(np.ones(Rres-1),tcenter))
s.gridArray[:,0]=Rflat*np.cos(tflat)
s.gridArray[:,1]=Rflat*np.sin(tflat)
s.gridArray[:,2]=np.ndarray.flatten(grid)
s.gridArray[:,3]=np.ndarray.flatten(counts)
#Throw away those that don't meet the threshold...
s.gridArray=s.gridArray[np.ndarray.flatten(counts)>=minCount,]
class AnalyticDisc(Disc):
def MMwrite(self,file="/home/my304/data/Output/MMtmp_data.txt"):
f=open(file,'w')
s=self
#The parameters must come first and there must be a multiple of 3
params=[s.Hfact, max(abs(s.data['z'])), s.T0, s.plIndex, s.TIndex, s.Rout,
s.Rin, s.Mstar, s.Mdisc, s.G, s.D]
while len(params)%3:
params.append(0.)
tmp=[str(p) for p in params]
f.write('\t'.join(tmp))
f.write('\n')
R=s.fetch(s.data['R'])
t=s.fetch(s.data['theta'])
z=s.fetch(s.data['z'])
for i in xrange(s.N):
f.write(str(R[i]) + '\t' + str(t[i]) + '\t' + str(z[i]) + '\n')
f.close()
s.MMsubset=s.subset
def MMread(s,file="/home/my304/data/Output/MMtmp_processed.txt",subset=None):
"""We need to know which indexes to insert this information into the
data object, one way is to use a supplied subset (s.MMsubest is saved
by MMwrite). If subset is None, we attempt to guess the index based on
the positions given in the MM file."""
s.MMdata=np.genfromtxt(file)
s.addCol('Agznostar')
s.addCol('MMapprox')
if subset is not None:
s.data['Agznostar'][subset]=s.MMdata[:,4]
else:
x=s.MMdata[:,0]*np.cos(s.MMdata[:,1])
y=s.MMdata[:,0]*np.sin(s.MMdata[:,1])
o=s.findIndex(x,y,s.MMdata[:,2])
if np.any(o==-1):
print "No indicies provided and failed to find match for %s points" % np.sum(o==-1)
raise
s.data['Agznostar'][o] = s.MMdata[:,4]
s.data['MMapprox'][o] = s.MMdata[:,3]
def describeDisc(self,Hfact=.002597943,T0=16.7,plIndex=-0.5,TIndex=0.,Rin=5.,Rout=100.,Mstar=1.,Mdisc=.1,G=887.2057):
if not self.loaded:
return
self.Hfact=Hfact
self.T0=T0
self.plIndex=plIndex
self.TIndex=TIndex
self.Rin=Rin
self.Rout=Rout
self.Mstar=Mstar
self.Mdisc=Mdisc
self.G=G
#We can infer some stuff if we have data...
self.Mdisc=np.sum(self.data['m'])
self.Rin=self.data['R'].min()
self.Rout=self.data['R'].max()
if plIndex==-2:
self.D=np.log2(self.Rout/self.Rin)
else:
self.D=(1./(self.plIndex+2.))*(self.Rout**(self.plIndex+2)-self.Rin**(self.plIndex+2))
self.addCol('AscaleH')
self.data['AscaleH']=self.Hfact*np.sqrt(self.T0/(self.Mstar*(self.Rin**self.TIndex)))*self.data['R']**((self.TIndex+3.)/2.)
self.addCol('Arho')
self.data['Arho']=(self.Mdisc/(2.*math.pi*self.D))*(self.data['R']**self.plIndex)*(np.exp((-1.*self.data['z']**2)/(2.*self.data['AscaleH']**2))/np.sqrt(2.*math.pi*self.data['AscaleH']**2))
self.addCol('AcdSur')
self.data['AcdSur']=((self.Mdisc*self.data['R']**self.plIndex)/(4.*math.pi*self.D))*(1-special.erf(np.abs(self.data['z'])/np.sqrt(2.*self.data['AscaleH']**2)))
self.addCol('AcdMid')
self.data['AcdMid']=(self.data['R']**self.plIndex)*(self.Mdisc/(4.*math.pi*self.D))*(special.erf(np.abs(self.data['z'])/np.sqrt(2.*self.data['AscaleH']**2)))
self.addCol('AcdTot')
self.data['AcdTot']=(self.Mdisc/(4.*math.pi*self.D))*self.data['R']**self.plIndex
self.addCol('Agzstar')
self.data['Agzstar']=-1.*(self.G*self.Mstar*self.data['z'])/(self.data['r']**3)
self.addCol('Agzapprox')
self.data['Agzapprox']=-4.*math.pi*self.G*self.data['AcdMid']*np.sign(self.data['z'])
self.addCol('gznostar')
self.data['gznostar']=self.data['gz']-self.data['Agzstar']
# def ctsH(self,R):
# return self.Hfact*np.sqrt(self.T0/(self.Mstar*(self.Rin**self.TIndex)))*R*((self.TIndex+3.)/2.)
# def ctsrho(R,z):
# return self.Mdisc/(2.*math.pi*self.D)*(R**self.plIndex)*np.exp((-1*z**2)/(2.*self.ctsH(R)**2))/np.sqrt(2*math.pi*self.ctsH(R)**2)
#A SPH smoother...
def W(q,h,ndim=3):
#Here q=|r_a-r_b|/h
import math
if isinstance(q,int) or isinstance(q,float):
q=np.array([q])
sigma3=1./math.pi
sigma2=10./(7.*math.pi)
sigma1=2./3.
if ndim==1:
sigma=sigma1
elif ndim==2:
sigma=sigma2
elif ndim==3:
sigma=sigma3
else:
raise NameError('ndim must be between 1-3')
out=q.copy()
c1=(q>=0) & (q<1)
c2=(q>=1) & (q<2)
c3=(q>=2)
out[c1] = (sigma/(h**ndim))*(1.-(1.5)*q[c1]**2+(.75)*q[c1]**3)
out[c2] = (sigma/(h**ndim))*(.25*(2-q[c2])**3)
out[c3] = 0
return out
| gpl-2.0 |
CforED/Machine-Learning | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/tests/test_pipeline.py | 23 | 15392 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA(svd_solver='full')
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = TransfT()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
| bsd-3-clause |
gfrubi/GR | figuras-editables/fig-fermi-rel-masa-densidad.py | 2 | 3662 | # -*- coding: utf-8 -*-
from matplotlib.pyplot import *
from numpy import *
from scipy.integrate import odeint, quad
def dm_dt(m_t, equis):
#retorna dm, dt
m = m_t[0]
t = m_t[1]
dmdx = equis**2*(sinh(t)- t)
dtdx = (-4.0/(equis*(equis-2.0*m)))*((equis**3/3.0)*(sinh(t)
- 8.0*sinh(t/2.0) + 3.0*t) + m)*((sinh(t) - 2.0*sinh(t/2.0))/(cosh(t)-4.0*cosh(t/2.0)+3.0))
#print(unoportres)
return (dmdx, dtdx)
####################################################################
####### Parte relativista ##########################################
####################################################################
te_0b = linspace(0.1,10.5,1200)
equis = linspace(1.0e-6, 15.0, 1000)
x1s = []
m1s = []
for t in te_0b:
m0_t0 = [0.0, t]
sol = odeint(dm_dt, m0_t0, equis)
if len(where(sol[:,0]<0)[0]) is not 0:
pos = (where(sol[:,0] < 0)[0][0])-1#tiene algunos nan, por eso se cae
elif len(where(isnan(sol[:,0])==True)[0]) is not 0:
pos = where(isnan(sol[:,0])==True)[0][0]-1 #Es para la solucion de t XXXXXXXXXXXXXXXXXX
dte = dm_dt([sol[pos,0], sol[pos,1]], equis[pos])[1] #el valor de dt en la pos
#aqui solo podemos hacer una extrapolacion lineal (una recta), pues no tenemos la segunda derivada
x1 = equis[pos] - sol[pos,1] / dte
x1s.append(x1)
dme = dm_dt([sol[pos,0], sol[pos,1]], equis[pos])[0] #el vamos de dm en la pos
m1 = dme * (x1 - equis[pos]) + sol[pos,0] #esto es m(x1)
m1s.append(m1)
densidad = 5.725e17*(sinh(te_0b) - te_0b) # kg/m^3
radio = 13.683*array(x1s) # km
masa = 9.2648*array(m1s) #en masas solares
def dphialt(phi, eta, iyo):
return(phi[1], -2.0*phi[1]/eta-(abs(phi[0]**2 - iyo**2))**(1.5))
def dphi(phi, eta, iyo):
return(phi[1], -2.0*phi[1]/eta-(phi[0]**2 - iyo**2 )**(1.5))
####################################################################
####### Parte newtoniana ###########################################
####################################################################
phi0 = [1.0, 0.0] #Las mismas condiciones iniciales que en Lane-Emden
eta = linspace(1.0e-30, 40.0, 1000)
eta1s = []
eta2Tpx1s = []
rhoNS = []
radioNS = []
masaNS = []
yceros = linspace(0.0, 0.9997, 1000)
for y in yceros:
sol = odeint(dphialt, phi0, eta, args=(y,))
pos = (where(sol[:,0] - y < 0)[0][0])-1
ddphi = dphi([sol[pos,0], sol[pos,1]], eta[pos], y)[1] #segunda derivada en la ultima posicion
#print(ddphi)
Deta0 = roots([0.5*ddphi,sol[pos,1],sol[pos,0]-y])
Deta = sort(Deta0[where(Deta0>0)])[0]
eta1 = eta[pos]+Deta
eta1s.append(eta1)
eta2Tpx1 = -eta1**2.0*(sol[pos,1] + Deta*ddphi)
eta2Tpx1s.append(eta2Tpx1)
radioNS = 4.18945*array(eta1s)*yceros
masaNS = 2.83673*array(eta2Tpx1s)
rhoNS = 6.10656e18 * (yceros**(-2.0) - 1.0)**(3.0/2)
erreNS = linspace(1.0e-30, 30, 10000)
emeNS = 3.4518*(10.0/erreNS)**3 #En masas solares
# Caso ultra relativista, alta densidad.
Mch = 5.7252
colores = ['blue','brown','red', 'purple']
dasheses = [[],[5,2],[5,2,2,2],[2,2]]
masita = linspace(0,6,10000)
radio_s = 2.953699653*masita #radio de Schwarzschild
fig, axes = subplots(figsize=(8,6))
axes.plot(densidad, masa, colores[3], dashes=dasheses[3], linewidth=1.50, label='RG')
axes.plot(rhoNS, masaNS, colores[2], dashes=dasheses[1], linewidth=1.50, label='Newton')
axes.legend(loc='best')
axes.set_xlabel('Densidad central $[kg/m^3]$',fontsize=15)
axes.set_ylabel('Masa $[M/M_{\odot}]$',fontsize=15)
axes.set_xlim([1e15,1e21])
axes.set_ylim([0,1])
axes.semilogx()
axes.grid(linestyle='dotted')
fig.savefig('../fig/fig-fermi-rel-masa-densidad.pdf')
#fig.show()
| gpl-3.0 |
luigivieira/fsdk | fsdk/utilities/show_bank.py | 1 | 2291 | #!/usr/bin/env python
#
# This file is part of the Fun SDK (fsdk) project. The complete source code is
# available at https://github.com/luigivieira/fsdk.
#
# Copyright (c) 2016-2017, Luiz Carlos Vieira (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import matplotlib.pyplot as plt
if __name__ == '__main__':
import sys
sys.path.append('../../')
import fsdk.filters.gabor as gb
#---------------------------------------------
def main(argv):
"""
Main entry point of this utility application.
This is simply a function called by the checking of namespace __main__, at
the end of this script (in order to execute only when this script is ran
directly).
Parameters
------
argv: list of str
Arguments received from the command line.
"""
# Create the bank of Gabor kernels
bank = gb.GaborBank()
# Plot the gabor bank
fig = bank.createPlotFigure()
# Show the plot on a maximized window
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
plt.show()
#---------------------------------------------
# namespace verification for invoking main
#---------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:]) | mit |
maheshakya/scikit-learn | sklearn/tests/test_metaestimators.py | 2 | 4686 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
]
def test_metaestimator_delegation():
"""Ensures specified metaestimators have methods iff subestimator does"""
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
KarlTDebiec/myplotspec_sim | moldynplot/PDistFigureManager.py | 2 | 15841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.PDistFigureManager.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Generates probability distribution figures to specifications
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("moldynplot")
import moldynplot
from .myplotspec.FigureManager import FigureManager
from .myplotspec.manage_defaults_presets import manage_defaults_presets
from .myplotspec.manage_kwargs import manage_kwargs
################################### CLASSES ###################################
class PDistFigureManager(FigureManager):
"""
Manages the generation of probability distribution figures.
"""
defaults = """
draw_figure:
subplot_kw:
autoscale_on: False
multi_tick_params:
left: on
right: off
bottom: on
top: off
shared_legend: True
shared_legend_kw:
spines: False
handle_kw:
ls: none
marker: s
mec: black
legend_kw:
borderaxespad: 0
frameon: False
handletextpad: 0
loc: 9
numpoints: 1
draw_subplot:
title_kw:
verticalalignment: bottom
ylabel: "Probability Distribution"
yticklabels: []
tick_params:
direction: out
left: on
right: off
bottom: on
top: off
grid: True
grid_kw:
b: True
color: [0.7,0.7,0.7]
linestyle: '-'
linewidth: 0.5
label_kw:
zorder: 10
horizontalalignment: left
verticalalignment: top
draw_dataset:
plot_kw:
zorder: 10
fill_between_kw:
color: [0.7, 0.7, 0.7]
lw: 0
ylb: 0
yub: 1
zorder: 1
handle_kw:
ls: none
marker: s
mec: black
mean_kw:
ls: none
marker: o
mec: black
zorder: 11
"""
available_presets = """
pmf:
class: content
help: Plot potential of mean force (PMF)
draw_figure:
multi_xticklabels: [2,3,4,5,6,7,8]
multi_yticklabels: [-3.0,-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5]
draw_subplot:
xlabel: Minimum N-O distance
xticks: [2,3,4,5,6,7,8]
ybound: [-3.2,0.8]
ylabel: "Potential of Mean Force\\n(kcal/mol)"
yticks: [-3.0,-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5]
draw_dataset:
column: pmf
dataset_kw:
cls: moldynplot.dataset.H5Dataset
default_address: /kde/pmf
default_key: pmf
draw_zero_line: True
radgyr:
class: content
help: Radius of Gyration (Rg)
draw_figure:
multi_xticklabels: [0,5,10,15,20,25,30]
draw_subplot:
xlabel: $R_g$ (Å)
xticks: [0,5,10,15,20,25,30]
draw_dataset:
column: rg
dataset_kw:
cls: moldynplot.dataset.TimeSeriesDataset.TimeSeriesDataset
calc_pdist: True
pdist_kw:
bandwidth: 0.1
grid: !!python/object/apply:numpy.linspace [0,30,1000]
read_csv_kw:
delim_whitespace: True
header: 0
names: [frame, rg, rgmax]
rmsd:
class: content
help: Root Mean Standard Deviation (RMSD)
draw_figure:
multi_xticklabels: [0,1,2,3,4,5]
draw_subplot:
xlabel: RMSD (Å)
xticks: [0,1,2,3,4,5]
draw_dataset:
column: rmsd
dataset_kw:
cls: moldynplot.dataset.TimeSeriesDataset.TimeSeriesDataset
calc_pdist: True
pdist_kw:
bandwidth: 0.1
grid: !!python/object/apply:numpy.linspace [0,5,1000]
read_csv_kw:
delim_whitespace: True
header: 0
names: [frame, rmsd]
r1:
class: content
help: Format subplot for R1 relaxation
draw_subplot:
xlabel: "$R_1$"
xticks: [0.0,0.5,1.0,1.5,2.0,2.5,3.0]
draw_dataset:
dataset_kw:
pdist_kw:
bandwidth: 0.02
column: r1
r2:
class: content
help: Format subplot for R2 relaxation
draw_subplot:
xlabel: "$R_2$"
xticks: [0,2,4,6,8,10,12,14,16,18,20]
draw_dataset:
dataset_kw:
pdist_kw:
bandwidth: 0.3
column: r2
r2/r1:
class: content
help: Format subplot for R2/R1 relaxation
draw_subplot:
xlabel: "$R_2$/$R_1$"
xticks: [3,4,5,6,7,8,9,10,11]
draw_dataset:
dataset_kw:
pdist_kw:
bandwidth:
r2/r1: 0.1
column: r2/r1
hetnoe:
class: content
help: Format subplot for Heteronuclear NOE relaxation
draw_subplot:
xlabel: "Heteronuclear NOE"
xticks: [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
draw_dataset:
column: noe
dataset_kw:
pdist_kw:
bandwidth: 0.03
rotdif:
class: content
help: Format subplot for rotational diffusion
draw_subplot:
xlabel: "$τ_c$ (ns)"
xticks: [5,6,7,8,9,10,11,12,13,14]
draw_dataset:
column: rotdif
dataset_kw:
pdist_kw:
bandwidth: 0.2
relaxation_3:
class: content
help: Three stacked plots including R1, R2, and HetNOE
draw_figure:
nrows: 3
shared_ylabel: "Probability Distribution"
subplots:
0:
preset: r1
ylabel: null
1:
preset: r2
ylabel: null
2:
preset: hetnoe
ylabel: null
relaxation_4:
class: content
help: Four stacked plots including R1, R2, R2/R1, and HetNOE
draw_figure:
nrows: 4
shared_ylabel: "Probability Distribution"
subplots:
0:
preset: r1
ylabel: null
1:
preset: r2
ylabel: null
2:
preset: r2/r1
ylabel: null
3:
preset: hetnoe
ylabel: null
rotdif_2:
class: content
help: Two stacked plots including R2/R1 rotdif
draw_figure:
nrows: 2
shared_ylabel: "Probability Distribution"
subplots:
0:
preset: r2/r1
ylabel: null
1:
preset: rotdif
ylabel: null
rotdif_4:
class: content
help: Two stacked plots including R2/R1 rotdif
draw_figure:
nrows: 2
ncols: 2
shared_ylabel: "Probability Distribution"
subplots:
0:
preset: r2/r1
ylabel: null
1:
preset: r2/r1
ylabel: null
2:
preset: rotdif
ylabel: null
3:
preset: rotdif
ylabel: null
manuscript:
class: target
inherits: manuscript
draw_figure:
bottom: 0.55
hspace: 0.10
left: 0.30
right: 0.10
sub_height: 1.00
sub_width: 2.65
top: 0.10
wspace: 0.10
shared_legend_kw:
left: 0.30
sub_width: 2.65
bottom: 0.00
sub_height: 0.20
handle_kw:
mew: 0.5
ms: 5
legend_kw:
labelspacing: 0.5
ncol: 6
shared_xlabel_kw:
bottom: -0.24
title_kw:
top: -0.1
draw_subplot:
xlabel_kw:
labelpad: 3
ylabel_kw:
labelpad: 6
y2ticks: []
y2label_kw:
rotation: 270
verticalalignment: bottom
grid_kw:
linewidth: 0.5
draw_label: True
label_kw:
border_lw: 1
xabs: 0.020
yabs: -0.025
draw_dataset:
mean_kw:
mew: 0.5
ms: 2
handle_kw:
mew: 0.5
ms: 5
presentation_wide:
class: target
inherits: presentation_wide
draw_figure:
bottom: 1.80
hspace: 0.20
left: 0.80
right: 0.80
sub_height: 2.00
sub_width: 4.00
top: 0.60
wspace: 0.20
shared_legend_kw:
left: 0.80
sub_width: 16.60
bottom: 0.00
sub_height: 0.60
handle_kw:
mew: 2.0
ms: 20
legend_kw:
labelspacing: 0.5
ncol: 6
shared_ylabel_kw:
left: -0.5
shared_xlabel_kw:
bottom: -0.9
draw_subplot:
y2ticks: []
y2label_kw:
labelpad: 10
rotation: 270
verticalalignment: bottom
draw_dataset:
mean_kw:
mew: 2.0
ms: 8
handle_kw:
mew: 2.0
ms: 20
"""
@manage_defaults_presets()
@manage_kwargs()
def draw_dataset(self, subplot, column=None,
draw_pdist=True, draw_fill_between=False, draw_mean=False,
draw_plot=False, draw_zero_line=False, **kwargs):
"""
Loads a dataset and draws it on a subplot.
Loaded dataset should have attribute `pdist_df`.
Arguments:
subplot (Axes): :class:`Axes<matplotlib.axes.Axes>` on
which to draw
dataset_kw (dict): Keyword arguments passed to
:meth:`load_dataset
<myplotspec.FigureManager.FigureManager.load_dataset>`
plot_kw (dict): Keyword arguments passed to methods of
:class:`Axes<matplotlib.axes.Axes>`
column (str): Column within `pdist_df` to use
draw_fill_between (bool): Fill between specified region
fill_between_kw (dict): Keyword arguments used to configure
call to
:meth:`fill_between<matplotlib.axes.Axes.fill_between>`
fill_between_kw[x] (list, ndarray): x values passed to
:meth:`fill_between<matplotlib.axes.Axes.fill_between>`
fill_between_kw[ylb] (list, ndarray): y lower bound values
passed to
:meth:`fill_between<matplotlib.axes.Axes.fill_between>`
fill_between_kw[yub] (list, ndarray): y upper bound values
passed to
:meth:`fill_between<matplotlib.axes.Axes.fill_between>`
draw_pdist (bool): Draw probability distribution
pdist_kw (dict): Keyword arguments using to configure call to
:meth:`plot<matplotlib.axes.Axes.plot>`
draw_mean (bool): Draw point at mean value
mean_kw (dict): Keyword arguments used to configure call to
:meth:`plot<matplotlib.axes.Axes.plot>`
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
from warnings import warn
import pandas as pd
import numpy as np
from .myplotspec import get_colors, multi_get_copy
# Process arguments
verbose = kwargs.get("verbose", 1)
dataset_kw = multi_get_copy("dataset_kw", kwargs, {})
if "infile" in kwargs:
dataset_kw["infile"] = kwargs["infile"]
dataset = self.load_dataset(verbose=verbose, **dataset_kw)
if dataset is not None and hasattr(dataset, "pdist_df"):
pdist_df = dataset.pdist_df
elif dataset is not None and hasattr(dataset, "datasets"):
try:
pdist_df = dataset.pdist_df = pd.DataFrame(
dataset.datasets["pmf"]["pmf"],
index=dataset.datasets["pmf"]["x"],
columns = ["pmf"])
except:
pdist_df = dataset.pdist_df = pd.DataFrame(
dataset.datasets["pmf"]["pmf"],
index=dataset.datasets["pmf"]["center"],
columns = ["pmf"])
dataset.pdist_df.index.name = "x"
else:
pdist_df = None
# Configure plot settings
plot_kw = multi_get_copy("plot_kw", kwargs, {})
get_colors(plot_kw, kwargs)
# Draw fill_between
if draw_fill_between:
fill_between_kw = multi_get_copy("fill_between_kw", kwargs, {})
get_colors(fill_between_kw, plot_kw)
if "x" in fill_between_kw:
fb_x = fill_between_kw.pop("x")
if "ylb" in fill_between_kw:
fb_ylb = fill_between_kw.pop("ylb")
if "yub" in fill_between_kw:
fb_yub = fill_between_kw.pop("yub")
subplot.fill_between(fb_x, fb_ylb, fb_yub, **fill_between_kw)
# Draw pdist
if draw_pdist:
if not hasattr(dataset, "pdist_df"):
warn("'draw_pdist' is enabled but dataset does not have the "
"necessary attribute 'pdist_df', skipping.")
else:
pdist = pdist_df[column]
pdist_kw = plot_kw.copy()
pdist_kw.update(kwargs.get("pdist_kw", {}))
pd_x = pdist.index.values
pd_y = np.squeeze(pdist.values)
subplot.plot(pd_x, pd_y, **pdist_kw)
pdist_rescale = True
if pdist_rescale:
pdist_max = pd_y.max()
y_max = subplot.get_ybound()[1]
if (pdist_max > y_max / 1.25
or not hasattr(subplot, "_mps_rescaled")):
# print("\nPIDST MAX: {0}\n".format(pdist_max))
subplot.set_ybound(0, pdist_max*1.25)
yticks = [0, pdist_max*0.25, pdist_max*0.50,
pdist_max*0.75, pdist_max, pdist_max*1.25]
subplot.set_yticks(yticks)
subplot._mps_rescaled = True
if draw_mean:
mean_kw = plot_kw.copy()
mean_kw.update(kwargs.get("mean_kw", {}))
mean = np.sum(np.array(pd_x, np.float64)
*np.array(pd_y, np.float64))
if verbose >= 1:
print("mean: {0:6.3f}".format(mean))
subplot.plot(mean, pd_y[np.abs(pd_x - mean).argmin()],
**mean_kw)
if draw_plot:
if "x" in kwargs:
x = kwargs.get("x")
subplot.plot([x, x], [0,1], **plot_kw)
if draw_zero_line:
subplot.plot([0, 10], [0,0], linewidth=0.5, color="black")
#################################### MAIN #####################################
if __name__ == "__main__":
PDistFigureManager().main()
| bsd-3-clause |
nkhuyu/seizure-prediction | seizure_prediction/transforms.py | 3 | 26810 | import numpy as np
from scipy.signal import hilbert
from sklearn import preprocessing
import scipy.stats
import pandas as pd
from data import to_np_array
# optional modules for trying out different transforms
try:
import pywt
except ImportError, e:
pass
try:
from scikits.talkbox.features import mfcc
except ImportError, e:
pass
# for auto regressive model
try:
import spectrum
except ImportError, e:
pass
# NOTE(mike): Some transforms operate on the raw data in the shape (NUM_CHANNELS, NUM_FEATURES).
# Others operate on windowed data in the shape (NUM_WINDOWS, NUM_CHANNELS, NUM_FEATURES).
# I've been a bit lazy and just made the ApplyManyTransform base class helper... so if you intend
# a transform to work on pre-windowed data, just write a plain transform with apply method, if
# you intend to work on windowed-data, derive from ApplyManyTransform and implement apply_one method.
# Really this is just a problem of number of axes, and np.apply_along_axis could probably be used to
# clean up this mess. :) I haven't bothered updating it as things are working as they are.
class ApplyManyTransform(object):
def apply(self, datas, meta):
if datas.ndim >= 3:
out = []
for d in datas:
out.append(self.apply_one(d, meta))
return to_np_array(out)
else:
return self.apply_one(datas, meta)
class FFT:
"""
Apply Fast Fourier Transform to the last axis.
"""
def get_name(self):
return "fft"
def apply(self, data, meta=None):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class Slice:
"""
Take a slice of the data on the last axis.
e.g. Slice(1, 48) works like a normal python slice, that is 1-47 will be taken
"""
def __init__(self, start, end=None):
self.start = start
self.end = end
def get_name(self):
return "slice%d%s" % (self.start, '-%d' % self.end if self.end is not None else '')
def apply(self, data, meta=None):
s = [slice(None),] * data.ndim
s[-1] = slice(self.start, self.end)
return data[s]
class MFCC:
"""
Mel-frequency cepstrum coefficients
"""
def get_name(self):
return "mfcc"
def apply(self, data, meta=None):
all_ceps = []
for ch in data:
ceps, mspec, spec = mfcc(ch)
all_ceps.append(ceps.ravel())
return to_np_array(all_ceps)
class Magnitude:
"""
Take magnitudes of Complex data
"""
def get_name(self):
return "mag"
def apply(self, data, meta=None):
return np.abs(data)
class Log:
"""
Apply LogE
"""
def get_name(self):
return "log"
def apply(self, data, meta=None):
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = (np.min(data) * 0.1)
return np.log(data)
class Log2:
"""
Apply Log2
"""
def get_name(self):
return "log2"
def apply(self, data, meta=None):
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = (np.min(data) * 0.1)
return np.log2(data)
class Log10:
"""
Apply Log10
"""
def get_name(self):
return "log10"
def apply(self, data, meta=None):
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = (np.min(data) * 0.1)
return np.log10(data)
class Stats(ApplyManyTransform):
"""
Subtract the mean, then take (min, max, standard_deviation) for each channel.
"""
def get_name(self):
return "stats"
def apply_one(self, data, meta=None):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(len(data)):
ch_data = data[i]
ch_data -= np.mean(ch_data)
outi = out[i]
outi[0] = np.std(ch_data)
outi[1] = np.min(ch_data)
outi[2] = np.max(ch_data)
return out
class MomentPerChannel(ApplyManyTransform):
"""
Calculate the Nth moment per channel.
"""
def __init__(self, n):
self.n = n
def get_name(self):
return "moment%d" % self.n
def apply_one(self, data, meta=None):
return scipy.stats.moment(data, moment=self.n, axis=data.ndim-1)
class UnitScale:
"""
Scale across the last axis.
"""
def get_name(self):
return 'unit-scale'
def apply(self, data, meta=None):
return preprocessing.scale(data, axis=data.ndim-1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def get_name(self):
return 'unit-scale-feat'
def apply(self, data, meta=None):
return preprocessing.scale(data.astype(np.float64), axis=0)
class CorrelationMatrix(ApplyManyTransform):
"""
Calculate correlation coefficients matrix across all EEG channels.
"""
def get_name(self):
return 'corr-mat'
def apply_one(self, data, meta=None):
return np.corrcoef(data)
class Eigenvalues(ApplyManyTransform):
"""
Take eigenvalues of a matrix, and sort them by magnitude in order to
make them useful as features (as they have no inherent order).
"""
def get_name(self):
return 'eigen'
def apply_one(self, data, meta=None):
w, v = np.linalg.eig(data)
w = np.absolute(w)
w.sort()
return w
# Take the upper right triangle of a matrix
def upper_right_triangle(matrix):
accum = []
for i in range(matrix.shape[0]):
for j in range(i+1, matrix.shape[1]):
accum.append(matrix[i, j])
return to_np_array(accum)
class UpperRightTriangle(ApplyManyTransform):
"""
Take the upper right triangle of a matrix.
"""
def get_name(self):
return 'urt'
def apply_one(self, data, meta=None):
assert data.ndim == 2 and data.shape[0] == data.shape[1]
return upper_right_triangle(data)
class FreqCorrelation(ApplyManyTransform):
"""
Correlation in the frequency domain. First take FFT with (start, end) slice options,
then calculate correlation co-efficients on the FFT output, followed by calculating
eigenvalues on the correlation co-efficients matrix.
The output features are (fft, upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, start_hz, end_hz, option, use_phase=False, with_fft=False, with_corr=True, with_eigen=True):
self.start_hz = start_hz
self.end_hz = end_hz
self.option = option
self.with_fft = with_fft
self.with_corr = with_corr
self.with_eigen = with_eigen
self.use_phase = use_phase
assert option in ('us', 'usf', 'none', 'fft_in')
assert with_corr or with_eigen
def get_name(self):
selections = []
if self.option in ('us', 'usf', 'fft_in'):
selections.append(self.option)
if self.with_fft:
selections.append('fft')
if not self.with_corr:
selections.append('nocorr')
if not self.with_eigen:
selections.append('noeig')
if len(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'freq-corr%s-%s-%s%s' % ('-phase' if self.use_phase else '', self.start_hz, self.end_hz, selection_str)
def apply_one(self, data, meta=None):
num_time_samples = data.shape[-1] if self.option != 'fft_in' else (data.shape[-1] - 1) * 2 # revert FFT shape change
if self.start_hz == 1 and self.end_hz is None:
freq_slice = Slice(self.start_hz, self.end_hz)
else:
# FFT range is from 0Hz to 101Hz
def calc_index(f):
return int((f / (meta.sampling_frequency/2.0)) * num_time_samples) if f is not None else num_time_samples
freq_slice = Slice(calc_index(self.start_hz), calc_index(self.end_hz))
# print data.shape, freq_slice.start, freq_slice.end
# import sys
# sys.exit(0)
data1 = data
if self.option != 'fft_in':
data1 = FFT().apply(data1)
data1 = freq_slice.apply(data1)
if self.use_phase:
data1 = np.angle(data1)
else:
data1 = Magnitude().apply(data1)
data1 = Log10().apply(data1)
data2 = data1
if self.option == 'usf':
data2 = UnitScaleFeat().apply(data2)
elif self.option == 'us':
data2 = UnitScale().apply(data2)
data2 = CorrelationMatrix().apply_one(data2)
if self.with_eigen:
w = Eigenvalues().apply_one(data2)
out = []
if self.with_corr:
data2 = upper_right_triangle(data2)
out.append(data2)
if self.with_eigen:
out.append(w)
if self.with_fft:
data1 = data1.ravel()
out.append(data1)
for d in out:
assert d.ndim == 1
return np.concatenate(out, axis=0)
class Correlation(ApplyManyTransform):
"""
Correlation in the time domain. Calculate correlation co-efficients
followed by calculating eigenvalues on the correlation co-efficients matrix.
The output features are (upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, scale_option, with_corr=True, with_eigen=True):
self.scale_option = scale_option
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ('us', 'usf', 'none')
assert with_corr or with_eigen
def get_name(self):
selections = []
if self.scale_option != 'none':
selections.append(self.scale_option)
if not self.with_corr:
selections.append('nocorr')
if not self.with_eigen:
selections.append('noeig')
if len(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'corr%s' % (selection_str)
def apply_one(self, data, meta=None):
data1 = data
if self.scale_option == 'usf':
data1 = UnitScaleFeat().apply(data1)
elif self.scale_option == 'us':
data1 = UnitScale().apply(data1)
data1 = CorrelationMatrix().apply_one(data1)
# patch nans
data1[np.where(np.isnan(data1))] = -2
if self.with_eigen:
w = Eigenvalues().apply_one(data1)
out = []
if self.with_corr:
data1 = upper_right_triangle(data1)
out.append(data1)
if self.with_eigen:
out.append(w)
for d in out:
assert d.ndim == 1
return np.concatenate(out, axis=0)
class FlattenChannels(object):
"""
Reshapes the data from (..., N_CHANNELS, N_FEATURES) to (..., N_CHANNELS * N_FEATURES)
"""
def get_name(self):
return 'fch'
def apply(self, data, meta=None):
if data.ndim == 2:
return data.ravel()
elif data.ndim == 3:
s = data.shape
return data.reshape((s[0], np.product(s[1:])))
else:
raise NotImplementedError()
class Windower:
"""
Breaks the time-series data into N second segments, for example 60s windows
will create 10 windows given a 600s segment. The output is the reshaped data
e.g. (600, 120000) -> (600, 10, 12000)
"""
def __init__(self, window_secs=None):
self.window_secs = window_secs
self.name = 'w-%ds' % window_secs if window_secs is not None else 'w-whole'
def get_name(self):
return self.name
def apply(self, X, meta=None):
if self.window_secs is None:
return X.reshape([1] + list(X.shape))
num_windows = meta.data_length_sec / self.window_secs
samples_per_window = self.window_secs * int(meta.sampling_frequency)
samples_used = num_windows * samples_per_window
samples_dropped = X.shape[-1] - samples_used
X = Slice(samples_dropped).apply(X)
out = np.split(X, num_windows, axis=X.ndim-1)
out = to_np_array(out)
return out
class PreictalWindowGenerator:
"""
Experimental windower that generates overlapping windows for preictal segments only.
The window_secs parameter describes how long each window is, and gen_factor describes
how many extra windows you want as a multiplier.
For example given a 600s segment, a window size of 60s will give you 10 windows,
this number is then multiplied by gen_factor, e.g. 20 windows if gen_factor is 2.
The window size is fixed and the starting point for each window will be evenly-spaced.
It's been a while since I've used this, not even sure if it works properly...
"""
def __init__(self, window_secs, gen_factor):
self.window_secs = window_secs
self.gen_factor = gen_factor
self.name = 'wg-%ds-%d' % (window_secs, gen_factor)
self.windower = Windower(window_secs)
def get_name(self):
return self.name
def apply(self, X, meta):
if meta.data_type == 'preictal':
num_windows = (meta.data_length_sec / self.window_secs) * self.gen_factor
samples_per_window = self.window_secs * int(meta.sampling_frequency) / self.gen_factor
samples_used = num_windows * samples_per_window
samples_dropped = X.shape[-1] - samples_used
X = Slice(samples_dropped).apply(X)
pieces = np.split(X, num_windows, axis=X.ndim-1)
pieces_per_window = self.gen_factor
gen = [np.concatenate(pieces[i:i+pieces_per_window], axis=pieces[0].ndim - 1) for i in range(0, num_windows - self.gen_factor + 1)]
gen = to_np_array(gen)
return gen
else:
return self.windower.apply(X, meta)
class Hurst:
"""
Hurst exponent per-channel, see http://en.wikipedia.org/wiki/Hurst_exponent
Another description can be found here: http://www.ijetch.org/papers/698-W10024.pdf
Kavya Devarajan, S. Bagyaraj, Vinitha Balasampath, Jyostna. E. and Jayasri. K.,
"EEG-Based Epilepsy Detection and Prediction," International Journal of Engineering
and Technology vol. 6, no. 3, pp. 212-216, 2014.
"""
def get_name(self):
return 'hurst'
def apply(self, X, meta):
def apply_one(x):
x -= x.mean()
z = np.cumsum(x)
r = (np.maximum.accumulate(z) - np.minimum.accumulate(z))[1:]
s = pd.expanding_std(x)[1:]
# prevent division by 0
s[np.where(s == 0)] = 1e-12
r += 1e-12
y_axis = np.log(r / s)
x_axis = np.log(np.arange(1, len(y_axis) + 1))
x_axis = np.vstack([x_axis, np.ones(len(x_axis))]).T
m, b = np.linalg.lstsq(x_axis, y_axis)[0]
return m
return np.apply_along_axis(apply_one, -1, X)
class PFD(ApplyManyTransform):
"""
Petrosian fractal dimension per-channel
Implementation derived from reading:
http://arxiv.org/pdf/0804.3361.pdf
F.S. Bao, D.Y.Lie,Y.Zhang,"A new approach to automated epileptic diagnosis using EEG
and probabilistic neural network",ICTAI'08, pp. 482-486, 2008.
"""
def get_name(self):
return 'pfd'
def pfd_for_ch(self, ch):
diff = np.diff(ch, n=1, axis=0)
asign = np.sign(diff)
sign_changes = ((np.roll(asign, 1) - asign) != 0).astype(int)
N_delta = np.count_nonzero(sign_changes)
n = len(ch)
log10n = np.log10(n)
return log10n / (log10n + np.log10(n / (n + 0.4 * N_delta)))
def apply_one(self, X, meta=None):
return to_np_array([self.pfd_for_ch(ch) for ch in X])
def hfd(X, kmax):
N = len(X)
Nm1 = float(N - 1)
L = np.empty((kmax,))
L[0] = np.sum(abs(np.diff(X, n=1))) # shortcut :)
for k in xrange(2, kmax + 1):
Lmks = np.empty((k,))
for m in xrange(1, k + 1):
i_end = (N - m) / k # int
Lmk_sum = np.sum(abs(np.diff(X[np.arange(m - 1, m + (i_end + 1) * k - 1, k)], n=1)))
Lmk = Lmk_sum * Nm1 / (i_end * k)
Lmks[m-1] = Lmk
L[k - 1] = np.mean(Lmks)
a = np.empty((kmax, 2))
a[:, 0] = np.log(1.0 / np.arange(1.0, kmax + 1.0))
a[:, 1] = 1.0
b = np.log(L)
# find x by solving for ax = b
x, residues, rank, s = np.linalg.lstsq(a, b)
return x[0]
class HFD(ApplyManyTransform):
"""
Higuchi fractal dimension per-channel
Implementation derived from reading:
http://arxiv.org/pdf/0804.3361.pdf
F.S. Bao, D.Y.Lie,Y.Zhang,"A new approach to automated epileptic diagnosis using EEG
and probabilistic neural network",ICTAI'08, pp. 482-486, 2008.
"""
def __init__(self, kmax):
self.kmax = kmax
def get_name(self):
return 'hfd-%d' % self.kmax
def apply_one(self, data, meta=None):
return to_np_array([hfd(ch, self.kmax) for ch in data])
class Diff(ApplyManyTransform):
"""
Wrapper for np.diff
"""
def __init__(self, order):
self.order = order
def get_name(self):
return 'diff-%d' % self.order
def apply_one(self, data, meta=None):
return np.diff(data, n=self.order, axis=data.ndim-1)
class SpectralEntropy(ApplyManyTransform):
"""
Calculates Shannon entropy between the given frequency ranges.
e.g. The probability density function of FFT magnitude is calculated, then
given range [1,2,3], Shannon entropy is calculated between 1hz and 2hz, 2hz and 3hz
in this case giving 2 values per channel.
NOTE(mike): Input for this transform must be from (FFT(), Magnitude())
"""
def __init__(self, freq_ranges, flatten=True):
self.freq_ranges = freq_ranges
self.flatten = flatten
def get_name(self):
return 'spec-ent-%s%s' % ('-'.join([str(f) for f in self.freq_ranges]), '-nf' if not self.flatten else '')
def apply_one(self, fft_mag, meta):
num_time_samples = (fft_mag.shape[-1] - 1) * 2 # revert FFT shape change
X = fft_mag ** 2
for ch in X:
ch /= np.sum(ch + 1e-12)
psd = X # pdf
out = []
#[0,1,2] -> [[0,1], [1,2]]
for start_freq, end_freq in zip(self.freq_ranges[:-1], self.freq_ranges[1:]):
start_index = np.floor((start_freq / meta.sampling_frequency) * num_time_samples)
end_index = np.floor((end_freq / meta.sampling_frequency) * num_time_samples)
selected = psd[:, start_index:end_index]
entropies = - np.sum(selected * np.log2(selected + 1e-12), axis=selected.ndim-1) / np.log2(end_index - start_index)
if self.flatten:
out.append(entropies.ravel())
else:
out.append(entropies)
if self.flatten:
return np.concatenate(out)
else:
return to_np_array(out)
class PIBSpectralEntropy(ApplyManyTransform):
"""
Similar to the calculations in SpectralEntropy transform, but instead power-in-band
is calculated over the given freq_ranges, finally Shannon entropy is calculated on that.
The output is a single entropy value per-channel.
NOTE(mike): Input for this transform must be from (FFT(), Magnitude())
"""
def __init__(self, freq_ranges):
self.freq_ranges = freq_ranges
def get_name(self):
return 'pib-spec-ent-%s' % '-'.join([str(f) for f in self.freq_ranges])
def apply_one(self, data, meta=None):
num_channels = data.shape[0]
num_time_samples = float((data.shape[-1] - 1) * 2) # revert FFT shape change
def norm(X):
for ch in X:
ch /= np.sum(ch + 1e-12)
return X
psd = data ** 2
psd = norm(psd)
# group into bins
def binned_psd(psd, out):
prev = freq_ranges[0]
for i, cur in enumerate(freq_ranges[1:]):
prev_index = np.floor((prev / meta.sampling_frequency) * num_time_samples)
cur_index = np.floor((cur / meta.sampling_frequency) * num_time_samples)
out[i] = np.sum(psd[prev_index:cur_index])
prev = cur
freq_ranges = self.freq_ranges
out = np.empty((num_channels, len(freq_ranges) - 1,))
for ch in range(num_channels):
binned_psd(psd[ch], out[ch])
psd_per_bin = norm(out)
def entropy_per_channel(psd):
entropy_components = psd * np.log2(psd + 1e-12)
entropy = -np.sum(entropy_components) / np.log2(psd.shape[-1])
return entropy
out = np.empty((num_channels,))
for i, ch in enumerate(psd_per_bin):
out[i] = entropy_per_channel(ch)
return out
class FreqBinning(ApplyManyTransform):
"""
Given spectral magnitude data, select a range of bins, and then choose a consolidation function
to use to calculate each bin. The sum can be used, or the mean, or the standard deviation.
NOTE(mike): Input for this transform must be from (FFT(), Magnitude())
"""
def __init__(self, freq_ranges, func=None):
self.freq_ranges = freq_ranges
assert func is None or func in ('sum', 'mean', 'std')
self.func = func
def get_name(self):
return 'fbin%s%s' % ('' if self.func is None else '-' + self.func, '-' + '-'.join([str(f) for f in self.freq_ranges]))
def apply_one(self, X, meta):
num_channels = X.shape[0]
num_time_samples = (X.shape[-1] - 1) * 2 # revert FFT shape change
if self.func == 'mean':
func = np.mean
elif self.func == 'std':
func = np.std
else:
func = np.sum
# group into bins
def binned_freq(data, out):
prev = freq_ranges[0]
for i, cur in enumerate(freq_ranges[1:]):
prev_index = np.floor((prev / meta.sampling_frequency) * num_time_samples)
cur_index = np.floor((cur / meta.sampling_frequency) * num_time_samples)
out[i] = func(data[prev_index:cur_index])
prev = cur
freq_ranges = self.freq_ranges
out = np.empty((num_channels, len(freq_ranges) - 1,))
for ch in range(num_channels):
binned_freq(X[ch], out[ch])
return out
class AR(ApplyManyTransform):
"""
Auto-regressive model as suggested by:
http://hdl.handle.net/1807/33224
https://tspace.library.utoronto.ca/bitstream/1807/33224/1/Green_Adrian_CA_201211_MASc_thesis.pdf
It is suggested to use a model order of 8.
"""
def __init__(self, order):
self.order = order
def get_name(self):
return 'ar%d' % self.order
def calc_for_ch(self, ch):
ar_coeffs, dnr, reflection_coeffs = spectrum.aryule(ch, self.order)
return np.abs(ar_coeffs)
def apply_one(self, X, meta):
return np.concatenate([self.calc_for_ch(ch) for ch in X], axis=0)
class SubMean:
"""
For each feature, subtract from each channel the mean across all channels.
This is to perform average reference montage.
"""
def get_name(self):
return 'subm'
def apply(self, X, meta):
assert X.ndim == 2
X -= X.mean(axis=0)
return X
def index_for_hz(X, hz, sampling_frequency):
return int((float(hz) / sampling_frequency) * X.shape[-1])
class Preprocess:
"""
Data preprocessing stage to normalize the data across all patients.
Data that has not had average reference montage applied needs it applied.
"""
def get_name(self):
return 'pp'
def apply(self, X, meta):
# NOTE(mike): Patient 1 and 2 have not subtracted the average reference from their raw data
# whereas Dogs 1 to 5 have. So bring these two patients into line to normalize the preprocessing
# across ALL patients.
if meta.target in ('Patient_1', 'Patient_2'):
X = SubMean().apply(X, meta)
return X
class PhaseSynchrony(ApplyManyTransform):
"""
Calculate phase synchrony between channels using Hilbert transform and Shannon entropy.
Method described in:
http://www.researchgate.net/publication/222567264_Comparison_of_Hilbert_transform_and_wavelet_methods_for_the_analysis_of_neuronal_synchrony/links/0deec52baa808a3812000000
Le Van Quyen M, Foucher J, Lachaux J-P, Rodriguez E, Lutz A, Martinerie JM, Varela FJ (2001)
Comparison of Hilbert transform and wavelet methods for the analysis of neural synchrony.
J Neurosci Methods 111:83-98
NOTE(mike): This seemed to work well in cross-validation, but I never got an increased
on the leaderboard.
"""
def __init__(self, with_eigen=False, with_raw=True):
assert with_eigen or with_raw
self.with_raw = with_raw
self.with_eigen = with_eigen
def get_name(self):
return 'phase-synchrony-%s%s' % ('-eigen' if self.with_eigen else '', '-noraw' if not self.with_raw else '')
def apply_one(self, X, meta):
h = X + (1j * hilbert(X))
phase = np.angle(h)
num_bins = int(np.exp(0.626 + 0.4 * np.log(X.shape[-1] - 1)))
Hmax = np.log(num_bins)
num_channels = X.shape[0]
if self.with_eigen:
M = np.ones((num_channels, num_channels), dtype=np.float64)
out = np.empty((num_channels * (num_channels - 1) / 2,), dtype=np.float64)
count = 0
for i in range(num_channels):
for j in range(i + 1, num_channels):
ch1_phase = phase[i]
ch2_phase = phase[j]
phase_diff = np.mod(np.abs(ch1_phase - ch2_phase), np.pi * 2.0)
# convert phase_diff into a pdf of num_bins
hist = np.histogram(phase_diff, bins=num_bins)[0]
pdf = hist.astype(np.float64) / np.sum(hist)
H = np.sum(pdf * np.log(pdf + 1e-12))
p = (H + Hmax) / Hmax
if self.with_eigen:
M[i][j] = p
M[j][i] = p
out[count] = p
count += 1
if self.with_eigen:
eigen = Eigenvalues().apply_one(M)
if self.with_eigen and self.with_raw:
return np.concatenate((out, eigen))
if self.with_eigen:
return eigen
else:
return out
| mit |
jyt109/sim-shootout | shootout.py | 5 | 15515 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <[email protected]>
"""
USAGE: %(program)s INPUT_DIRECTORY
Compare speed and accuracy of several similarity retrieval methods, using the corpus prepared by prepare_shootout.py.
Example: ./shootout.py ~/data/wiki/shootout
"""
import os
import sys
import time
import logging
import itertools
from functools import wraps
import numpy
import gensim
MAX_DOCS = 10000000 # clip the dataset at this many docs, if larger (=use a wiki subset)
TOP_N = 10 # how many similars to ask for
ACC = 'exact' # what accuracy are we aiming for
NUM_QUERIES = 100 # query with this many different documents, as a single experiment
REPEATS = 3 # run all queries this many times, take the best timing
FLANN_995 = {
'log_level': 'info',
'target_precision': 0.995,
'algorithm': 'autotuned',
}
FLANN_99 = { # autotuned on wiki corpus with target_precision=0.99
'iterations': 5,
'multi_probe_level_': 2L,
'cb_index': 0.20000000298023224,
'centers_init': 'default',
'log_level': 'info',
'build_weight': 0.009999999776482582,
'leaf_max_size': 4,
'memory_weight': 0.0,
'sample_fraction': 0.10000000149011612,
'checks': 12288,
'max_neighbors': -1,
'random_seed': 215924497,
'trees': 1,
'target_precision': 0.99,
'table_number_': 12L,
'sorted': 1,
'branching': 32,
'algorithm': 'kmeans',
'key_size_': 20L,
'eps': 0.0,
'cores': 0
}
FLANN_98 = {
'iterations': 5,
'multi_probe_level_': 2L,
'cb_index': 0.20000000298023224,
'centers_init': 'default',
'log_level': 'info',
'build_weight': 0.009999999776482582,
'leaf_max_size': 4,
'memory_weight': 0.0,
'sample_fraction': 0.10000000149011612,
'checks': 5072,
'max_neighbors': -1,
'random_seed': 638707089,
'trees': 1,
'target_precision': 0.98,
'table_number_': 12L,
'sorted': 1,
'branching': 16,
'algorithm': 'kmeans',
'key_size_': 20L,
'eps': 0.0,
'cores': 0,
}
FLANN_95 = {
'iterations': 5,
'multi_probe_level_': 2L,
'cb_index': 0.20000000298023224,
'centers_init': 'default',
'log_level': 'info',
'build_weight': 0.009999999776482582,
'leaf_max_size': 4,
'memory_weight': 0.0,
'sample_fraction': 0.10000000149011612,
'checks': 3072,
'max_neighbors': -1,
'random_seed': 638707089,
'trees': 1,
'target_precision': 0.949999988079071,
'table_number_': 12L,
'sorted': 1,
'branching': 16,
'algorithm': 'kmeans',
'key_size_': 20L,
'eps': 0.0,
'cores': 0,
}
FLANN_9 = {
'algorithm': 'kmeans',
'branching': 32,
'build_weight': 0.009999999776482582,
'cb_index': 0.20000000298023224,
'centers_init': 'default',
'checks': 2688,
'cores': 0,
'eps': 0.0,
'iterations': 1,
'key_size_': 20L,
'leaf_max_size': 4,
'log_level': 'info',
'max_neighbors': -1,
'memory_weight': 0.0,
'multi_probe_level_': 2L,
'random_seed': 354901449,
'sample_fraction': 0.10000000149011612,
'sorted': 1,
'table_number_': 12L,
'target_precision': 0.9,
'trees': 1
}
FLANN_7 = {
'iterations': 5,
'multi_probe_level_': 2L,
'cb_index': 0.0,
'centers_init': 'default',
'log_level': 'info',
'build_weight': 0.009999999776482582,
'leaf_max_size': 4,
'memory_weight': 0.0,
'sample_fraction': 0.10000000149011612,
'checks': 684,
'max_neighbors': -1,
'random_seed': 746157721,
'trees': 4,
'target_precision': 0.699999988079071,
'table_number_': 12L,
'sorted': 1,
'branching': 32,
'algorithm': 'default',
'key_size_': 20L,
'eps': 0.0,
'cores': 0
}
ACC_SETTINGS = {
'flann': {'7': FLANN_7, '9': FLANN_9, '95': FLANN_95, '99': FLANN_99, '995': FLANN_995},
'annoy': {'1': 1, '10': 10, '50': 50, '100': 100, '500': 500},
'lsh': {'low': {'k': 10, 'l': 10, 'w': float('inf')}, 'high': {'k': 10, 'l': 10, 'w': float('inf')}},
}
logger = logging.getLogger('shootout')
def profile(fn):
@wraps(fn)
def with_profiling(*args, **kwargs):
times = []
logger.info("benchmarking %s at k=%s acc=%s" % (fn.__name__, TOP_N, ACC))
for _ in xrange(REPEATS): # try running it three times, report the best time
start = time.time()
ret = fn(*args, **kwargs)
times.append(time.time() - start)
logger.info("%s took %.3fms/query" % (fn.__name__, 1000.0 * min(times) / NUM_QUERIES))
logger.info("%s raw timings: %s" % (fn.__name__, times))
return ret
return with_profiling
@profile
def gensim_1by1(index, queries):
for query in queries:
_ = index[query]
@profile
def gensim_at_once(index, queries):
_ = index[queries]
@profile
def flann_1by1(index, queries):
for query in queries:
_ = index.nn_index(query, TOP_N)
@profile
def flann_at_once(index, queries):
_ = index.nn_index(queries, TOP_N)
@profile
def sklearn_1by1(index, queries):
for query in queries:
_ = index.kneighbors(query, n_neighbors=TOP_N)
@profile
def sklearn_at_once(index, queries):
_ = index.kneighbors(queries, n_neighbors=TOP_N)
@profile
def kgraph_1by1(index, queries, dataset):
for query in queries:
_ = index.search(dataset, query[None, :], K=TOP_N, threads=1)
@profile
def kgraph_at_once(index, queries, dataset):
_ = index.search(dataset, queries, K=TOP_N, threads=1)
@profile
def annoy_1by1(index, queries):
for query in queries:
_ = index.get_nns_by_vector(list(query.astype(float)), TOP_N)
@profile
def lsh_1by1(index, queries):
for query in queries:
_ = index.Find(query[:, None])[:TOP_N]
def flann_predictions(index, queries):
if TOP_N == 1:
# flann returns differently shaped arrays when asked for only 1 nearest neighbour
return [index.nn_index(query, TOP_N)[0] for query in queries]
else:
return [index.nn_index(query, TOP_N)[0][0] for query in queries]
def sklearn_predictions(index, queries):
return [list(index.kneighbors(query, TOP_N)[1].ravel()) for query in queries]
def annoy_predictions(index, queries):
return [index.get_nns_by_vector(list(query.astype(float)), TOP_N) for query in queries]
def lsh_predictions(index, queries):
return [[pos for pos, _ in index_lsh.Find(query[:, None])[:TOP_N]] for query in queries]
def gensim_predictions(index, queries):
return [[pos for pos, _ in index[query]] for query in queries]
def kgraph_predictions(index, queries):
global dataset
return index.search(dataset, queries, K=TOP_N, threads=1)
def get_accuracy(predicted_ids, queries, gensim_index, expecteds=None):
"""Return precision (=percentage of overlapping ids) and average similarity difference."""
logger.info("computing ground truth")
correct, diffs = 0.0, []
for query_no, (predicted, query) in enumerate(zip(predicted_ids, queries)):
expected_ids, expected_sims = zip(*gensim_index[query]) if expecteds is None else expecteds[query_no]
correct += len(set(expected_ids).intersection(predicted))
predicted_sims = [numpy.dot(gensim_index.vector_by_id(id1), query) for id1 in predicted]
# if we got less than TOP_N results, assume zero similarity for the missing ids (LSH)
predicted_sims.extend([0.0] * (TOP_N - len(predicted_sims)))
diffs.extend(-numpy.array(predicted_sims) + expected_sims)
return correct / (TOP_N * len(queries)), numpy.mean(diffs), numpy.std(diffs), max(diffs)
def log_precision(method, index, queries, gensim_index, expecteds=None):
logger.info("computing accuracy of %s over %s queries at k=%s, acc=%s" % (method.__name__, NUM_QUERIES, TOP_N, ACC))
acc, avg_diff, std_diff, max_diff = get_accuracy(method(index, queries), queries, gensim_index, expecteds)
logger.info("%s precision=%.3f, avg diff=%.3f, std diff=%.5f, max diff=%.3f" % (method.__name__, acc, avg_diff, std_diff, max_diff))
def print_similar(title, index_gensim, id2title, title2id):
"""Print out the most similar Wikipedia articles, given an article title=query"""
pos = title2id[title.lower()] # throws if title not found
for pos2, sim in index_gensim[index_gensim.vector_by_id(pos)]:
print pos2, `id2title[pos2]`, sim
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# check and process input arguments
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print globals()['__doc__'] % locals()
sys.exit(1)
indir = sys.argv[1]
if len(sys.argv) > 2:
TOP_N = int(sys.argv[2])
if len(sys.argv) > 3:
ACC = sys.argv[3]
lsi_vectors = os.path.join(indir, 'lsi_vectors.mm.gz')
logger.info("testing k=%s and acc=%s" % (TOP_N, ACC))
mm = gensim.corpora.MmCorpus(gensim.utils.smart_open(lsi_vectors))
num_features, num_docs = mm.num_terms, min(mm.num_docs, MAX_DOCS)
sim_prefix = os.path.join(indir, 'index%s' % num_docs)
# some libs (flann, sklearn) expect the entire input as a full matrix, all at once (no streaming)
if os.path.exists(sim_prefix + "_clipped.npy"):
logger.info("loading dense corpus (need for flann, scikit-learn)")
clipped = numpy.load(sim_prefix + "_clipped.npy", mmap_mode='r')
else:
logger.info("creating dense corpus of %i documents under %s" % (num_docs, sim_prefix + "_clipped.npy"))
clipped = numpy.empty((num_docs, num_features), dtype=numpy.float32)
for docno, doc in enumerate(itertools.islice(mm, num_docs)):
if docno % 100000 == 0:
logger.info("at document #%i/%i" % (docno + 1, num_docs))
clipped[docno] = gensim.matutils.sparse2full(doc, num_features)
numpy.save(sim_prefix + "_clipped.npy", clipped)
clipped_corpus = gensim.matutils.Dense2Corpus(clipped, documents_columns=False) # same as islice(mm, num_docs)
logger.info("selecting %s documents, to act as top-%s queries" % (NUM_QUERIES, TOP_N))
queries = clipped[:NUM_QUERIES]
if os.path.exists(sim_prefix + "_gensim"):
logger.info("loading gensim index")
index_gensim = gensim.similarities.Similarity.load(sim_prefix + "_gensim")
index_gensim.output_prefix = sim_prefix
index_gensim.check_moved() # update shard locations in case the files were copied somewhere else
else:
logger.info("building gensim index")
index_gensim = gensim.similarities.Similarity(sim_prefix, clipped_corpus, num_best=TOP_N, num_features=num_features, shardsize=100000)
index_gensim.save(sim_prefix + "_gensim")
index_gensim.num_best = TOP_N
logger.info("finished gensim index %s" % index_gensim)
logger.info("loading mapping between article titles and ids")
id2title = gensim.utils.unpickle(os.path.join(indir, 'id2title'))
title2id = dict((title.lower(), pos) for pos, title in enumerate(id2title))
# print_similar('Anarchism', index_gensim, id2title, title2id)
if 'gensim' in program:
# log_precision(gensim_predictions, index_gensim, queries, index_gensim)
gensim_at_once(index_gensim, queries)
gensim_1by1(index_gensim, queries)
if 'flann' in program:
import pyflann
pyflann.set_distance_type('euclidean')
index_flann = pyflann.FLANN()
flann_fname = sim_prefix + "_flann_%s" % ACC
if os.path.exists(flann_fname):
logger.info("loading flann index")
index_flann.load_index(flann_fname, clipped)
else:
logger.info("building FLANN index")
# flann expects index vectors as a 2d numpy array, features = columns
params = index_flann.build_index(clipped, **ACC_SETTINGS['flann'][ACC])
logger.info("built flann index with %s" % params)
index_flann.save_index(flann_fname)
logger.info("finished FLANN index")
log_precision(flann_predictions, index_flann, queries, index_gensim)
flann_1by1(index_flann, queries)
flann_at_once(index_flann, queries)
if 'annoy' in program:
import annoy
index_annoy = annoy.AnnoyIndex(num_features, metric='angular')
annoy_fname = sim_prefix + "_annoy_%s" % ACC
if os.path.exists(annoy_fname):
logger.info("loading annoy index")
index_annoy.load(annoy_fname)
else:
logger.info("building annoy index")
# annoy expects index vectors as lists of Python floats
for i, vec in enumerate(clipped_corpus):
index_annoy.add_item(i, list(gensim.matutils.sparse2full(vec, num_features).astype(float)))
index_annoy.build(ACC_SETTINGS['annoy'][ACC])
index_annoy.save(annoy_fname)
logger.info("built annoy index")
log_precision(annoy_predictions, index_annoy, queries, index_gensim)
annoy_1by1(index_annoy, queries)
if 'lsh' in program:
import lsh
if os.path.exists(sim_prefix + "_lsh"):
logger.info("loading lsh index")
index_lsh = gensim.utils.unpickle(sim_prefix + "_lsh")
else:
logger.info("building lsh index")
index_lsh = lsh.index(**ACC_SETTINGS['lsh'][ACC])
# lsh expects input as D x 1 numpy arrays
for vecno, vec in enumerate(clipped_corpus):
index_lsh.InsertIntoTable(vecno, gensim.matutils.sparse2full(vec)[:, None])
gensim.utils.pickle(index_lsh, sim_prefix + '_lsh')
logger.info("finished lsh index")
log_precision(lsh_predictions, index_lsh, queries, index_gensim)
lsh_1by1(index_lsh, queries)
if 'sklearn' in program:
from sklearn.neighbors import NearestNeighbors
if os.path.exists(sim_prefix + "_sklearn"):
logger.info("loading sklearn index")
index_sklearn = gensim.utils.unpickle(sim_prefix + "_sklearn")
else:
logger.info("building sklearn index")
index_sklearn = NearestNeighbors(n_neighbors=TOP_N, algorithm='auto').fit(clipped)
logger.info("built sklearn index %s" % index_sklearn._fit_method)
gensim.utils.pickle(index_sklearn, sim_prefix + '_sklearn') # 32GB RAM not enough to store the scikit-learn model...
logger.info("finished sklearn index")
log_precision(sklearn_predictions, index_sklearn, queries, index_gensim)
sklearn_1by1(index_sklearn, queries)
sklearn_at_once(index_sklearn, queries)
if 'kgraph' in program:
import pykgraph
index_kgraph = pykgraph.KGraph()
if os.path.exists(sim_prefix + "_kgraph"):
logger.info("loading kgraph index")
index_kgraph.load(sim_prefix + "_kgraph")
else:
logger.info("building kgraph index")
index_kgraph.build(clipped)
logger.info("built kgraph index")
index_kgraph.save(sim_prefix + "_kgraph")
logger.info("finished kgraph index")
global dataset
dataset = clipped
log_precision(kgraph_predictions, index_kgraph, queries, index_gensim)
kgraph_1by1(index_kgraph, queries, clipped)
kgraph_at_once(index_kgraph, queries, clipped)
logger.info("finished running %s" % program)
| mit |
shicai/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
kchodorow/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 56 | 1959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
tensorflow/tensorflow | tensorflow/python/kernel_tests/constant_op_eager_test.py | 6 | 22324 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.util import compat
# TODO(josh11b): add tests with lists/tuples, Shape.
# TODO(ashankar): Collapse with tests in constant_op_test.py and use something
# like the test_util.run_in_graph_and_eager_modes decorator to confirm
# equivalence between graph and eager execution.
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with context.device("/device:CPU:0"):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
device = test_util.gpu_device_name()
if device:
np_ans = np.array(x)
with context.device(device):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
orig = [-1.0, 2.0, 0.0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints
orig = [-1.5, 2, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints that don't fit in int32
orig = [1, 2**42, 0.5]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig, dtypes_lib.float64)
self.assertEqual(dtypes_lib.float64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# This integer is not exactly representable as a double, gets rounded.
tf_ans = constant_op.constant(2**54 + 1, dtypes_lib.float64)
self.assertEqual(2**54, tf_ans.numpy())
# This integer is larger than all non-infinite numbers representable
# by a double, raises an exception.
with self.assertRaisesRegex(ValueError, "out-of-range integer"):
constant_op.constant(10**310, dtypes_lib.float64)
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
self._testAll([-1, 2])
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
# Should detect out of range for int32 and use int64 instead.
orig = [2, 2**48, -2**48]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.int64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Out of range for an int64
with self.assertRaisesRegex(ValueError, "out-of-range integer"):
constant_op.constant([2**72])
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5
]).astype(np.complex128))
self._testAll(
np.complex(1, 2) * np.random.normal(size=30).reshape(
[2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
@test_util.disable_tfrt("support creating string tensors from empty "
"numpy arrays.")
def testString(self):
val = [compat.as_bytes(str(x)) for x in np.arange(-15, 15)]
self._testCpu(np.array(val).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
val = ops.convert_to_tensor(b"\0\0\0\0").numpy()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
val = ops.convert_to_tensor(b"xx\0xx").numpy()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
val = ops.convert_to_tensor(nested).numpy()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testStringConstantOp(self):
s = constant_op.constant("uiuc")
self.assertEqual(s.numpy().decode("utf-8"), "uiuc")
s_array = constant_op.constant(["mit", "stanford"])
self.assertAllEqual(s_array.numpy(), ["mit", "stanford"])
with ops.device("/cpu:0"):
s = constant_op.constant("cmu")
self.assertEqual(s.numpy().decode("utf-8"), "cmu")
s_array = constant_op.constant(["berkeley", "ucla"])
self.assertAllEqual(s_array.numpy(), ["berkeley", "ucla"])
def testExplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeFill(self):
c = constant_op.constant(12, shape=[7])
self.assertEqual(c.get_shape(), [7])
self.assertAllEqual([12, 12, 12, 12, 12, 12, 12], c.numpy())
def testExplicitShapeReshape(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[5, 2, 3])
self.assertEqual(c.get_shape(), [5, 2, 3])
def testImplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeTooBig(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testShapeTooSmall(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShapeWrong(self):
with self.assertRaisesRegex(TypeError, None):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShape(self):
self._testAll(constant_op.constant([1]).get_shape())
def testDimension(self):
x = constant_op.constant([1]).shape[0]
self._testAll(x)
def testDimensionList(self):
x = [constant_op.constant([1]).shape[0]]
self._testAll(x)
# Mixing with regular integers is fine too
self._testAll([1] + x)
self._testAll(x + [1])
def testDimensionTuple(self):
x = constant_op.constant([1]).shape[0]
self._testAll((x,))
self._testAll((1, x))
self._testAll((x, 1))
def testInvalidLength(self):
class BadList(list):
def __init__(self):
super(BadList, self).__init__([1, 2, 3]) # pylint: disable=invalid-length-returned
def __len__(self): # pylint: disable=invalid-length-returned
return -1
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant([BadList()])
with self.assertRaisesRegex(ValueError, "mixed types"):
constant_op.constant([1, 2, BadList()])
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant(BadList())
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant([[BadList(), 2], 3])
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant([BadList(), [1, 2, 3]])
with self.assertRaisesRegex(ValueError, "should return >= 0"):
constant_op.constant([BadList(), []])
# TODO(allenl, josh11b): These cases should return exceptions rather than
# working (currently shape checking only checks the first element of each
# sequence recursively). Maybe the first one is fine, but the second one
# silently truncating is rather bad.
# with self.assertRaisesRegex(ValueError, "should return >= 0"):
# constant_op.constant([[3, 2, 1], BadList()])
# with self.assertRaisesRegex(ValueError, "should return >= 0"):
# constant_op.constant([[], BadList()])
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegex(ValueError, "non-rectangular Python sequence"):
constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegex(ValueError, None):
constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegex(ValueError, None):
constant_op.constant([[1, 2], [3], [4, 5]])
# TODO(ashankar): This test fails with graph construction since
# tensor_util.make_tensor_proto (invoked from constant_op.constant)
# does not handle iterables (it relies on numpy conversion).
# For consistency, should graph construction handle Python objects
# that implement the sequence protocol (but not numpy conversion),
# or should eager execution fail on such sequences?
def testCustomSequence(self):
# This is inspired by how many objects in pandas are implemented:
# - They implement the Python sequence protocol
# - But may raise a KeyError on __getitem__(self, 0)
# See https://github.com/tensorflow/tensorflow/issues/20347
class MySeq(object):
def __getitem__(self, key):
if key != 1 and key != 3:
raise KeyError(key)
return key
def __len__(self):
return 2
def __iter__(self):
l = list([1, 3])
return l.__iter__()
self.assertAllEqual([1, 3], self.evaluate(constant_op.constant(MySeq())))
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.EagerTensor))
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
z_value = z_var.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
@test_util.disable_tfrt("b/169112823: unsupported dtype for Op:ZerosLike.")
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=False)
@test_util.disable_tfrt("b/169112823: unsupported dtype for Op:ZerosLike.")
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.bool, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=True)
@test_util.disable_tfrt("b/169112823: unsupported dtype for Op:ZerosLike.")
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).numpy()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(test.TestCase):
def _Ones(self, shape):
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
numpy_dtype = dtype.as_numpy_dtype
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.numpy()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
ctx = context.context()
device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
with ops.device(device):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.numpy()
self.assertAllClose(np_ans, out)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").numpy()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill(shape, 7)
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([3, 2], [1.0, 2.0])
if __name__ == "__main__":
test.main()
| apache-2.0 |
pedrovma/pysal | doc/conf.py | 4 | 9048 | # -*- coding: utf-8 -*-
#
# pysal documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 6 15:54:22 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys, os
import sphinx_bootstrap_theme
###############################################################################################################
# Snippet to avoid this issue with pip: https://github.com/pysal/segregation/issues/58#issuecomment-502983859 #
###############################################################################################################
# Inspired in https://docs.readthedocs.io/en/stable/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['rtree']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
sys.path.insert(0, os.path.abspath("../"))
# import your package to obtain the version info to display on the docs website
import pysal
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ #'sphinx_gallery.gen_gallery',
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinxcontrib.bibtex",
"sphinx.ext.mathjax",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"numpydoc",
#'sphinx.ext.napoleon',
"matplotlib.sphinxext.plot_directive",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pysal" # string of your project name, for example, 'giddy'
copyright = "2018-, pysal developers"
author = "pysal developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version.
version = pysal.__version__ # should replace it with your pysal
release = pysal.__version__ # should replace it with your pysal
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "tests/*"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "bootstrap"
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_title = "%s v%s Manual" % (project, version)
# (Optional) Logo of your package. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
# html_logo = "_static/images/package_logo.jpg"
# (Optional) PySAL favicon
html_favicon = "_static/images/pysal_favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
"navbar_title": "pysal", # string of your project name, for example, 'giddy'
# Render the next and previous page links in navbar. (Default: true)
"navbar_sidebarrel": False,
# Render the current pages TOC in the navbar. (Default: true)
#'navbar_pagenav': True,
#'navbar_pagenav': False,
# No sidebar
"nosidebar": True,
# Tab name for the current pages TOC. (Default: "Page")
#'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
"globaltoc_depth": 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
"globaltoc_includehidden": "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
#'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
"navbar_fixed_top": "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
"source_link_position": "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo", "yeti", "flatly".
"bootswatch_theme": "yeti",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
"bootstrap_version": "3",
# Navigation bar menu
"navbar_links": [
("Installation", "installation"),
("API", "api"),
("References", "references"),
],
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pysal" + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pysal.tex", u"pysal Documentation", u"pysal developers", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pysal", u"pysal Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pysal",
u"pysal Documentation",
author,
"pysal",
"One line description of project.",
"Miscellaneous",
)
]
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = True
class_members_toctree = True
numpydoc_show_inherited_class_members = True
numpydoc_use_plots = True
# display the source code for Plot directive
plot_include_source = True
def setup(app):
app.add_stylesheet("pysal-styles.css")
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/3.6/": None}
| bsd-3-clause |
Elizaveta239/PyDev.Debugger | pydev_ipython/qt_for_kernel.py | 6 | 3580 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api, QT_API_PYQT5)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
# #PyDev-779: In pysrc/pydev_ipython/qt_for_kernel.py, matplotlib_options should be replaced with latest from ipython
# (i.e.: properly check backend to decide upon qt4/qt5).
backend = mpl.rcParams.get('backend', None)
if backend == 'Qt4Agg':
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt4v2':
return [QT_API_PYQT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
elif backend == 'Qt5Agg':
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
mpqt)
# Fallback without checking backend (previous code)
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for qt backend from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE, QT_API_PYQT5]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
| epl-1.0 |
MechCoder/scikit-learn | sklearn/tests/test_metaestimators.py | 52 | 4990 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
aselle/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 28 | 5024 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
from tensorflow.python.util.deprecation import deprecated
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
@deprecated(None, 'Please use tf.estimator.inputs.pandas_input_fn')
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
icdishb/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
steinnymir/BeamProfiler | utilities/camera.py | 1 | 5615 | # -*- coding: utf-8 -*-
"""
@author: Steinn Ymir Agustsson
Copyright (C) 2018 Steinn Ymir Agustsson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import time
import cv2
import numpy as np
from utilities import misc
def prop_id(string=None):
names = ['CV_CAP_PROP_POS_MSEC', 'CV_CAP_PROP_POS_FRAMES', 'CV_CAP_PROP_POS_AVI_RATIO', 'CV_CAP_PROP_FRAME_WIDTH',
'CV_CAP_PROP_FRAME_HEIGHT', 'CV_CAP_PROP_FPS', 'CV_CAP_PROP_FOURCC', 'CV_CAP_PROP_FRAME_COUNT',
'CV_CAP_PROP_FORMAT', 'CV_CAP_PROP_MODE', 'CV_CAP_PROP_BRIGHTNESS', 'CV_CAP_PROP_CONTRAST',
'CV_CAP_PROP_SATURATION', 'CV_CAP_PROP_HUE', 'CV_CAP_PROP_GAIN', 'CV_CAP_PROP_EXPOSURE',
'CV_CAP_PROP_CONVERT_RGB', 'CV_CAP_PROP_WHITE_BALANCE', 'CV_CAP_PROP_RECTIFICATION']
if string == None:
for i, x in enumerate(names):
print(i, x)
for i, x in enumerate(names):
if string.upper() in x:
return i
def show_webcam(cam):
while True:
r_, img = cam.read()
cv2.imshow('rgb', img)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
class SimCam(object):
def __init__(self, shape=(600, 800), small_changes=True, force_roundish=True):
self.lenx, self.leny = shape
self.x = np.linspace(0, self.lenx - 1, self.lenx)
self.y = np.linspace(0, self.leny - 1, self.leny)
self.xx, self.yy = np.meshgrid(self.x, self.y)
self.frame = None
self.moments = None
self.SMALL_CHANGES = small_changes
self.force_roundish = force_roundish
@property
def shape(self):
return (self.leny, self.lenx, 3)
@shape.setter
def shape(self, yx):
assert isinstance(yx, tuple), 'must be tuple'
self.leny, self.lenx = yx
self.x = np.linspace(0, self.lenx - 1, self.lenx)
self.y = np.linspace(0, self.leny - 1, self.leny)
self.xx, self.yy = np.meshgrid(self.x, self.y)
def read(self):
if self.SMALL_CHANGES:
img, self.moments = misc.simulate_peaks(moments=self.moments)
if self.force_roundish:
img, self.moments = misc.simulate_peaks(moments=self.moments, )
else:
img, self.moments = misc.simulate_peaks()
return True, img
def reset_moments(self):
self.moments = None
def get(self, prop):
if prop.upper() in 'CV_CAP_PROP_FRAME_WIDTH':
return self.lenx
elif prop.upper() in 'CV_CAP_PROP_FRAME_HEIGHT':
return self.leny
else:
pass
def set(self, prop, val):
if prop.upper() in 'CV_CAP_PROP_FRAME_WIDTH':
self._reshape(self, self.lenx, val)
elif prop.upper() in 'CV_CAP_PROP_FRAME_HEIGHT':
self._reshape(self, val, self.leny)
else:
pass
def release(self):
print('correctly terminated simulated cam')
class Camera(object):
def __init__(self, address=None, revert_colors=True):
self.address = address
if self.address is None:
self.cam = SimCam()
else:
self.cam = cv2.VideoCapture(self.address)
self.frames = []
self.revert_colors = revert_colors
def read_multiple(self, n, timeout=10):
assert isinstance(n, int), 'number of frames must be integer'
frames = []
i = 0
while len(frames) < n:
r_, frame = self.read()
if r_:
frames.append(frame)
else:
i += 1
time.sleep(.1)
if i > timeout:
break
avgframe = np.zeros_like(frame)
for frame in frames:
avgframe += frame
return avgframe
def read(self):
r_, frame = self.cam.read()
if r_ and self.revert_colors and len(frame.shape) == 3:
frame = frame[..., ::-1]
return r_, frame
def grab(self):
return self.cam.grab()
def isOpened(self):
return self.cam.isOpened()
def release(self):
return self.cam.release()
def retrieve(self):
return self.cam.retrieve()
def set(self, prop, val):
if type(prop) == str:
prop = prop_id(prop)
return (self.cam.set(prop, val))
def get(self, prop):
if type(prop) == str:
prop = prop_id(prop)
return (self.cam.get(prop))
def __del__(self):
self.cam.release()
def snapshot(self):
r_, frame = self.read()
if r_:
plt.figure('snapshot')
plt.imshow(frame)
plt.show()
def maximise_resolution(self):
self.set('width', 1920)
self.set('height', 1080)
def get_resolution(self):
x = self.get(3)
y = self.get(4)
return (x, y)
def main():
pass
if __name__ == '__main__':
import matplotlib.pyplot as plt
cam = Camera()
r_, img = cam.read()
plt.imshow(img)
plt.show()
| gpl-3.0 |
zzcclp/spark | python/pyspark/pandas/tests/test_indexops_spark.py | 15 | 2952 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pyspark.sql.utils import AnalysisException
from pyspark.sql import functions as F
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SparkIndexOpsMethodsTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_series_transform_negative(self):
with self.assertRaisesRegex(
ValueError, "The output of the function.* pyspark.sql.Column.*int"
):
self.psser.spark.transform(lambda scol: 1)
with self.assertRaisesRegex(AnalysisException, "cannot resolve.*non-existent.*"):
self.psser.spark.transform(lambda scol: F.col("non-existent"))
def test_multiindex_transform_negative(self):
with self.assertRaisesRegex(
NotImplementedError, "MultiIndex does not support spark.transform yet"
):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 1, 1, 1, 2, 1, 2, 2]],
)
s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
s.index.spark.transform(lambda scol: scol)
def test_series_apply_negative(self):
with self.assertRaisesRegex(
ValueError, "The output of the function.* pyspark.sql.Column.*int"
):
self.psser.spark.apply(lambda scol: 1)
with self.assertRaisesRegex(AnalysisException, "cannot resolve.*non-existent.*"):
self.psser.spark.transform(lambda scol: F.col("non-existent"))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_indexops_spark import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
jreback/pandas | pandas/tests/frame/methods/test_infer_objects.py | 6 | 1241 | from datetime import datetime
from pandas import DataFrame
import pandas._testing as tm
class TestInferObjects:
def test_infer_objects(self):
# GH#11221
df = DataFrame(
{
"a": ["a", 1, 2, 3],
"b": ["b", 2.0, 3.0, 4.1],
"c": [
"c",
datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3),
],
"d": [1, 2, 3, "d"],
},
columns=["a", "b", "c", "d"],
)
df = df.iloc[1:].infer_objects()
assert df["a"].dtype == "int64"
assert df["b"].dtype == "float64"
assert df["c"].dtype == "M8[ns]"
assert df["d"].dtype == "object"
expected = DataFrame(
{
"a": [1, 2, 3],
"b": [2.0, 3.0, 4.1],
"c": [datetime(2016, 1, 1), datetime(2016, 1, 2), datetime(2016, 1, 3)],
"d": [2, 3, "d"],
},
columns=["a", "b", "c", "d"],
)
# reconstruct frame to verify inference is same
result = df.reset_index(drop=True)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
krasch/wowohnen | vbb/pairs.py | 1 | 1149 | from itertools import permutations, combinations
import random
from time import sleep
import pandas as pd
import vbb
stops = pd.read_csv("../vbb_raw/stops_berlin.csv", sep=",")
stops = stops[["stop_id","stop_name"]]
stops = [tuple(stop) for stop in stops.values]
pairs = list(permutations(stops, 2))
# for now, only keep the ones that end at Alexanderplatz
alex = 9100003
pairs = [pair for pair in pairs if pair[1][0]==alex]
# query vbb api for average trip duration
random.shuffle(pairs)
durations = []
for i, ((origin_id, origin_name), (destination_id, destination_name)) in enumerate(pairs):
print (i)
try:
duration = vbb.trip_duration(origin_id, destination_id)
durations.append((origin_id, origin_name, destination_id, destination_name, duration))
except:
print (origin_id, origin_name, destination_id, destination_name)
sleep(random.randint(10, 800) / 1000.0)
durations = pd.DataFrame(durations,
columns=["origin_id", "origin_name", "destination_id", "destination_name", "duration"])
durations = durations.sort("duration")
durations.to_csv("durations.csv", index=False)
| mit |
vascotenner/holoviews | tests/testoptions.py | 1 | 18128 | import os
import pickle
import numpy as np
from holoviews import Store, StoreOptions, Histogram, Image
from holoviews.core.options import OptionError, Cycle, Options, OptionTree
from holoviews.element.comparison import ComparisonTestCase
from holoviews import plotting # noqa Register backends
from unittest import SkipTest
Options.skip_invalid = False
class TestOptions(ComparisonTestCase):
def test_options_init(self):
Options('test')
def test_options_valid_keywords1(self):
Options('test', allowed_keywords=['kw1'], kw1='value')
def test_options_valid_keywords2(self):
Options('test', allowed_keywords=['kw1', 'kw2'], kw1='value')
def test_options_valid_keywords3(self):
Options('test', allowed_keywords=['kw1', 'kw2'], kw1='value', kw2='value')
def test_options_any_keywords3(self):
Options('test', kw1='value', kw2='value')
def test_options_invalid_keywords1(self):
try:
Options('test', allowed_keywords=['kw1'], kw='value')
except OptionError as e:
self.assertEqual(str(e), "Invalid option 'kw', valid options are: ['kw1']")
def test_options_invalid_keywords2(self):
try:
Options('test', allowed_keywords=['kw2'], kw2='value', kw3='value')
except OptionError as e:
self.assertEqual(str(e), "Invalid option 'kw3', valid options are: ['kw2']")
def test_options_get_options(self):
opts = Options('test', allowed_keywords=['kw2', 'kw3'],
kw2='value', kw3='value').options
self.assertEqual(opts, dict(kw2='value', kw3='value'))
def test_options_get_options_cyclic1(self):
opts = Options('test', allowed_keywords=['kw2', 'kw3'],
kw2='value', kw3='value')
for i in range(16):
self.assertEqual(opts[i], dict(kw2='value', kw3='value'))
def test_options_keys(self):
opts = Options('test', allowed_keywords=['kw3', 'kw2'],
kw2='value', kw3='value')
self.assertEqual(opts.keys(), ['kw2', 'kw3'])
def test_options_inherit(self):
original_kws = dict(kw2='value', kw3='value')
opts = Options('test', **original_kws)
new_kws = dict(kw4='val4', kw5='val5')
new_opts = opts(**new_kws)
self.assertEqual(new_opts.options, dict(original_kws, **new_kws))
def test_options_inherit_invalid_keywords(self):
original_kws = dict(kw2='value', kw3='value')
opts = Options('test', allowed_keywords=['kw3', 'kw2'], **original_kws)
new_kws = dict(kw4='val4', kw5='val5')
try:
opts(**new_kws)
except OptionError as e:
self.assertEqual(str(e), "Invalid option 'kw4', valid options are: ['kw2', 'kw3']")
class TestCycle(ComparisonTestCase):
def test_cycle_init(self):
Cycle(values=['a', 'b', 'c'])
Cycle(values=[1, 2, 3])
def test_cycle_expansion(self):
cycle1 = Cycle(values=['a', 'b', 'c'])
cycle2 = Cycle(values=[1, 2, 3])
opts = Options('test', one=cycle1, two=cycle2)
self.assertEqual(opts[0], {'one': 'a', 'two': 1})
self.assertEqual(opts[1], {'one': 'b', 'two': 2})
self.assertEqual(opts[2], {'one': 'c', 'two': 3})
self.assertEqual(opts[3], {'one': 'a', 'two': 1})
self.assertEqual(opts[4], {'one': 'b', 'two': 2})
self.assertEqual(opts[5], {'one': 'c', 'two': 3})
def test_cycle_slice(self):
cycle1 = Cycle(values=['a', 'b', 'c'])[2]
cycle2 = Cycle(values=[1, 2, 3])
opts = Options('test', one=cycle1, two=cycle2)
self.assertEqual(opts[0], {'one': 'a', 'two': 1})
self.assertEqual(opts[1], {'one': 'b', 'two': 2})
self.assertEqual(opts[2], {'one': 'a', 'two': 1})
self.assertEqual(opts[3], {'one': 'b', 'two': 2})
def test_options_property_disabled(self):
cycle1 = Cycle(values=['a', 'b', 'c'])
opts = Options('test', one=cycle1)
try:
opts.options
except Exception as e:
self.assertEqual(str(e), "The options property may only be used with non-cyclic Options.")
class TestOptionTree(ComparisonTestCase):
def test_optiontree_init_1(self):
OptionTree(groups=['group1', 'group2'])
def test_optiontree_init_2(self):
OptionTree(groups=['group1', 'group2'])
def test_optiontree_setter_getter(self):
options = OptionTree(groups=['group1', 'group2'])
opts = Options('group1', kw1='value')
options.MyType = opts
self.assertEqual(options.MyType['group1'], opts)
self.assertEqual(options.MyType['group1'].options, {'kw1':'value'})
def test_optiontree_dict_setter_getter(self):
options = OptionTree(groups=['group1', 'group2'])
opts1 = Options(kw1='value1')
opts2 = Options(kw2='value2')
options.MyType = {'group1':opts1, 'group2':opts2}
self.assertEqual(options.MyType['group1'], opts1)
self.assertEqual(options.MyType['group1'].options, {'kw1':'value1'})
self.assertEqual(options.MyType['group2'], opts2)
self.assertEqual(options.MyType['group2'].options, {'kw2':'value2'})
def test_optiontree_inheritance(self):
options = OptionTree(groups=['group1', 'group2'])
opts1 = Options(kw1='value1')
opts2 = Options(kw2='value2')
options.MyType = {'group1':opts1, 'group2':opts2}
opts3 = Options(kw3='value3')
opts4 = Options(kw4='value4')
options.MyType.Child = {'group1':opts3, 'group2':opts4}
self.assertEqual(options.MyType.Child.options('group1').kwargs,
{'kw1':'value1', 'kw3':'value3'})
self.assertEqual(options.MyType.Child.options('group2').kwargs,
{'kw2':'value2', 'kw4':'value4'})
def test_optiontree_inheritance_flipped(self):
"""
Tests for ordering problems manifested in issue #93
"""
options = OptionTree(groups=['group1', 'group2'])
opts3 = Options(kw3='value3')
opts4 = Options(kw4='value4')
options.MyType.Child = {'group1':opts3, 'group2':opts4}
opts1 = Options(kw1='value1')
opts2 = Options(kw2='value2')
options.MyType = {'group1':opts1, 'group2':opts2}
self.assertEqual(options.MyType.Child.options('group1').kwargs,
{'kw1':'value1', 'kw3':'value3'})
self.assertEqual(options.MyType.Child.options('group2').kwargs,
{'kw2':'value2', 'kw4':'value4'})
class TestStoreInheritance(ComparisonTestCase):
"""
Tests to prevent regression after fix in 71c1f3a that resolves
issue #43
"""
def setUp(self):
self.store_copy = OptionTree(sorted(Store.options().items()),
groups=['style', 'plot', 'norm'])
self.backend = 'matplotlib'
Store.current_backend = self.backend
Store.options(val=OptionTree(groups=['plot', 'style']))
options = Store.options()
self.default_plot = dict(plot1='plot1', plot2='plot2')
options.Histogram = Options('plot', **self.default_plot)
self.default_style = dict(style1='style1', style2='style2')
options.Histogram = Options('style', **self.default_style)
data = [np.random.normal() for i in range(10000)]
frequencies, edges = np.histogram(data, 20)
self.hist = Histogram(frequencies, edges)
super(TestStoreInheritance, self).setUp()
def lookup_options(self, obj, group):
return Store.lookup_options(self.backend, obj, group)
def tearDown(self):
Store.options(val=self.store_copy)
super(TestStoreInheritance, self).tearDown()
def test_original_style_options(self):
self.assertEqual(self.lookup_options(self.hist, 'style').options,
self.default_style)
def test_original_plot_options(self):
self.assertEqual(self.lookup_options(self.hist, 'plot').options,
self.default_plot)
def test_plot_inheritance_addition(self):
"Adding an element"
hist2 = self.hist(plot={'plot3':'plot3'})
self.assertEqual(self.lookup_options(hist2, 'plot').options,
dict(plot1='plot1', plot2='plot2', plot3='plot3'))
# Check style works as expected
self.assertEqual(self.lookup_options(hist2, 'style').options, self.default_style)
def test_plot_inheritance_override(self):
"Overriding an element"
hist2 = self.hist(plot={'plot1':'plot_child'})
self.assertEqual(self.lookup_options(hist2, 'plot').options,
dict(plot1='plot_child', plot2='plot2'))
# Check style works as expected
self.assertEqual(self.lookup_options(hist2, 'style').options, self.default_style)
def test_style_inheritance_addition(self):
"Adding an element"
hist2 = self.hist(style={'style3':'style3'})
self.assertEqual(self.lookup_options(hist2, 'style').options,
dict(style1='style1', style2='style2', style3='style3'))
# Check plot options works as expected
self.assertEqual(self.lookup_options(hist2, 'plot').options, self.default_plot)
def test_style_inheritance_override(self):
"Overriding an element"
hist2 = self.hist(style={'style1':'style_child'})
self.assertEqual(self.lookup_options(hist2, 'style').options,
dict(style1='style_child', style2='style2'))
# Check plot options works as expected
self.assertEqual(self.lookup_options(hist2, 'plot').options, self.default_plot)
class TestOptionTreeFind(ComparisonTestCase):
def setUp(self):
options = OptionTree(groups=['group'])
self.opts1 = Options('group', kw1='value1')
self.opts2 = Options('group', kw2='value2')
self.opts3 = Options('group', kw3='value3')
self.opts4 = Options('group', kw4='value4')
self.opts5 = Options('group', kw5='value5')
self.opts6 = Options('group', kw6='value6')
options.MyType = self.opts1
options.XType = self.opts2
options.MyType.Foo = self.opts3
options.MyType.Bar = self.opts4
options.XType.Foo = self.opts5
options.XType.Bar = self.opts6
self.options = options
self.original_options = Store.options()
Store.options(val = OptionTree(groups=['group']))
def tearDown(self):
Store.options(val=self.original_options)
def test_optiontree_find1(self):
self.assertEqual(self.options.find('MyType').options('group').options,
dict(kw1='value1'))
def test_optiontree_find2(self):
self.assertEqual(self.options.find('XType').options('group').options,
dict(kw2='value2'))
def test_optiontree_find3(self):
self.assertEqual(self.options.find('MyType.Foo').options('group').options,
dict(kw1='value1', kw3='value3'))
def test_optiontree_find4(self):
self.assertEqual(self.options.find('MyType.Bar').options('group').options,
dict(kw1='value1', kw4='value4'))
def test_optiontree_find5(self):
self.assertEqual(self.options.find('XType.Foo').options('group').options,
dict(kw2='value2', kw5='value5'))
def test_optiontree_find6(self):
self.assertEqual(self.options.find('XType.Bar').options('group').options,
dict(kw2='value2', kw6='value6'))
def test_optiontree_find_mismatch1(self):
self.assertEqual(self.options.find('MyType.Baz').options('group').options,
dict(kw1='value1'))
def test_optiontree_find_mismatch2(self):
self.assertEqual(self.options.find('XType.Baz').options('group').options,
dict(kw2='value2'))
def test_optiontree_find_mismatch3(self):
self.assertEqual(self.options.find('Baz').options('group').options, dict())
def test_optiontree_find_mismatch4(self):
self.assertEqual(self.options.find('Baz.Baz').options('group').options, dict())
class TestCrossBackendOptions(ComparisonTestCase):
"""
Test the style system can style a single object across backends.
"""
def setUp(self):
if 'bokeh' not in Store.renderers:
raise SkipTest("Cross background tests assumes bokeh is available.")
self.store_mpl = OptionTree(sorted(Store.options(backend='matplotlib').items()),
groups=['style', 'plot', 'norm'])
self.store_bokeh = OptionTree(sorted(Store.options(backend='bokeh').items()),
groups=['style', 'plot', 'norm'])
self.clear_options()
super(TestCrossBackendOptions, self).setUp()
def clear_options(self):
# Clear global options..
Store.options(val=OptionTree(groups=['plot', 'style']), backend='matplotlib')
Store.options(val=OptionTree(groups=['plot', 'style']), backend='bokeh')
# ... and custom options
Store.custom_options({}, backend='matplotlib')
Store.custom_options({}, backend='bokeh')
def tearDown(self):
Store.options(val=self.store_mpl, backend='matplotlib')
Store.options(val=self.store_bokeh, backend='bokeh')
Store.current_backend = 'matplotlib'
super(TestCrossBackendOptions, self).tearDown()
def test_mpl_bokeh_mpl(self):
img = Image(np.random.rand(10,10))
# Use blue in matplotlib
Store.current_backend = 'matplotlib'
StoreOptions.set_options(img, style={'Image':{'cmap':'Blues'}})
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# Use purple in bokeh
Store.current_backend = 'bokeh'
StoreOptions.set_options(img, style={'Image':{'cmap':'Purple'}})
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
# Check it is still blue in matplotlib...
Store.current_backend = 'matplotlib'
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# And purple in bokeh..
Store.current_backend = 'bokeh'
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
return img
def test_mpl_bokeh_offset_mpl(self):
img = Image(np.random.rand(10,10))
# Use blue in matplotlib
Store.current_backend = 'matplotlib'
StoreOptions.set_options(img, style={'Image':{'cmap':'Blues'}})
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# Switch to bokeh and style a random object...
Store.current_backend = 'bokeh'
img2 = Image(np.random.rand(10,10))
StoreOptions.set_options(img2, style={'Image':{'cmap':'Reds'}})
img2_opts = Store.lookup_options('bokeh', img2, 'style').options
self.assertEqual(img2_opts, {'cmap':'Reds'})
# Use purple in bokeh on the object...
StoreOptions.set_options(img, style={'Image':{'cmap':'Purple'}})
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
# Check it is still blue in matplotlib...
Store.current_backend = 'matplotlib'
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# And purple in bokeh..
Store.current_backend = 'bokeh'
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
return img
class TestCrossBackendOptionPickling(TestCrossBackendOptions):
cleanup = ['test_raw_pickle.pkl', 'test_pickle_mpl_bokeh.pkl']
def tearDown(self):
super(TestCrossBackendOptionPickling, self).tearDown()
for f in self.cleanup:
try:
os.remove(f)
except:
pass
def test_raw_pickle(self):
"""
Test usual pickle saving and loading (no style information preserved)
"""
fname= 'test_raw_pickle.pkl'
raw = super(TestCrossBackendOptionPickling, self).test_mpl_bokeh_mpl()
pickle.dump(raw, open(fname,'wb'))
self.clear_options()
img = pickle.load(open(fname,'rb'))
# Data should match
self.assertEqual(raw, img)
# But the styles will be lost without using Store.load/Store.dump
pickle.current_backend = 'matplotlib'
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {})
# ... across all backends
Store.current_backend = 'bokeh'
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {})
def test_pickle_mpl_bokeh(self):
"""
Test pickle saving and loading with Store (style information preserved)
"""
fname = 'test_pickle_mpl_bokeh.pkl'
raw = super(TestCrossBackendOptionPickling, self).test_mpl_bokeh_mpl()
Store.dump(raw, open(fname,'wb'))
self.clear_options()
img = Store.load(open(fname,'rb'))
# Data should match
self.assertEqual(raw, img)
# Check it is still blue in matplotlib...
Store.current_backend = 'matplotlib'
mpl_opts = Store.lookup_options('matplotlib', img, 'style').options
self.assertEqual(mpl_opts, {'cmap':'Blues'})
# And purple in bokeh..
Store.current_backend = 'bokeh'
bokeh_opts = Store.lookup_options('bokeh', img, 'style').options
self.assertEqual(bokeh_opts, {'cmap':'Purple'})
| bsd-3-clause |
etkirsch/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
e-koch/VLA_Lband | 17B-162/HI/imaging/feather_comparisons.py | 1 | 7057 |
'''
Compare the data where they overlap in the uv plane.
No offset correction is needed.
'''
from spectral_cube import SpectralCube
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import os
import scipy.ndimage as nd
from uvcombine.scale_factor import find_scale_factor
from cube_analysis.feather_cubes import feather_compare_cube
from paths import (seventeenB_HI_data_02kms_path,
seventeenB_HI_data_1kms_path,
data_path, allfigs_path)
from constants import hi_freq
from plotting_styles import onecolumn_figure
# Compare with the 1 km/s cube. Higher S/N
# vla_cube = SpectralCube.read(seventeenB_HI_data_02kms_path("M33_14B_17B_HI_contsub_width_02kms.image.pbcor.fits"))
vla_cube = SpectralCube.read(seventeenB_HI_data_1kms_path("M33_14B_17B_HI_contsub_width_1kms.image.pbcor.fits"))
# pb_cube = SpectralCube.read(seventeenB_HI_data_02kms_path("M33_14B_17B_HI_contsub_width_02kms.pb.fits"))
pb_cube = SpectralCube.read(seventeenB_HI_data_1kms_path("M33_14B_17B_HI_contsub_width_1kms.pb.fits"))
# PB minimally changes over the frequency range. So just grab one plane
pb_plane = pb_cube[0]
# We need to define a tapered weighting function to ignore emission outside
# of the VLA mosaic
def taper_weights(mask, sigma, nsig_cut=3):
dist = nd.distance_transform_edt(mask)
gauss_dists = np.where(np.logical_and(dist < nsig_cut * sigma, dist > 0.))
flat_dists = np.where(dist >= nsig_cut * sigma)
weight_arr = np.zeros_like(mask, dtype=float)
weight_arr[gauss_dists] = \
np.exp(- (dist[gauss_dists] - nsig_cut * sigma)**2 / (2 * sigma**2))
weight_arr[flat_dists] = 1.
return weight_arr
weight = taper_weights(np.isfinite(pb_plane), 30, nsig_cut=5)
gbt_path = os.path.join(data_path, "GBT")
# gbt_cube = SpectralCube.read(os.path.join(gbt_path, "17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_02kms.fits"))
gbt_cube = SpectralCube.read(os.path.join(gbt_path, "17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_1kms.fits"))
beam_fwhm = lambda diam: ((1.18 * hi_freq.to(u.cm, u.spectral())) / diam.to(u.cm)) * u.rad
# Already determined from the 14B HI analysis. Lowered spatial resolution
# due to lack of overlap in the GBT fields centered at M33. So the data were
# gridded with a Gaussian kernel, rather than a jinc function
gbt_eff_beam = beam_fwhm(87.5 * u.m)
# The shortest baseline in the 14B-088 data is ~44 m.
las = (hi_freq.to(u.cm, u.spectral()) / (44 * u.m)).to(u.arcsec, u.dimensionless_angles())
radii, ratios, high_pts, low_pts, chan_out = \
feather_compare_cube(vla_cube, gbt_cube, las,
num_cores=1,
lowresfwhm=gbt_eff_beam,
chunk=50,
verbose=False,
weights=weight,
relax_spectral_check=False,
# NOTE: there is an offset of ~0.4 km/s between the cubes
# The big GBT beam means this really doesn't matter (I
# manually checked). The difference is 0.36 times the
# channel size. I have no idea where this shift is coming
# from since the freq axis used in `gbt_regrid.py` matches
# the frequency in the individual channel MSs used in
# imaging. It's not even a half-channel offset like I
# would expect if the MS frequency was the channel edge...
spec_check_kwargs={'rtol': 0.4})
onecolumn_figure()
sc_factor, sc_err = find_scale_factor(np.hstack(low_pts), np.hstack(high_pts),
method='distrib',
verbose=True)
plt.grid(True)
plt.xlabel(r"ln I$_{\rm int}$ / I$_{\rm SD}$")
plt.tight_layout()
plt.savefig(allfigs_path("Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.png"))
plt.savefig(allfigs_path("Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"))
print("Factor: {0}+/-{1}".format(sc_factor, sc_err))
# Factor: 1.125046+/-0.00394768
# This isn't a fantastic fit, so this error was significantly underestimated
plt.close()
# Compare properties per-channel
sc_factor_chans = []
sc_err_chans = []
for low, high in zip(low_pts, high_pts):
sc_f, sc_e = \
find_scale_factor(low, high,
method='distrib',
verbose=False)
sc_factor_chans.append(sc_f)
sc_err_chans.append(sc_e)
sc_factor_chans_linfit = []
sc_err_chans_linfit = []
for low, high in zip(low_pts, high_pts):
sc_f, sc_e = \
find_scale_factor(low, high,
method='linfit',
verbose=False)
sc_factor_chans_linfit.append(sc_f)
sc_err_chans_linfit.append(sc_e)
sc_factor_chans_linfit = np.array(sc_factor_chans_linfit)
sc_err_chans_linfit = np.array(sc_err_chans_linfit)
chans = np.arange(len(low_pts))
onecolumn_figure()
plt.errorbar(chans, sc_factor_chans,
yerr=sc_err_chans,
alpha=0.5, label='Distrib Fit')
plt.errorbar(chans, sc_factor_chans_linfit,
yerr=[sc_factor_chans_linfit - sc_err_chans_linfit[:, 0],
sc_err_chans_linfit[:, 1] - sc_factor_chans_linfit],
alpha=0.5, label='Linear fit')
# plt.plot(chans, slope_lowess_85)
plt.axhline(1, linestyle='--')
plt.legend(frameon=True)
plt.ylabel(r"Scale Factor")
plt.xlabel("Channels")
plt.grid(True)
plt.tight_layout()
plt.savefig(allfigs_path("Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.png"))
plt.savefig(allfigs_path("Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"))
plt.close()
# Now refit with the channels near the systemic velocity, where most of the HI
# structure falls within the mosaic PB
chan_range = slice(80, 160)
onecolumn_figure()
sc_factor_chrange, sc_err_chrange = \
find_scale_factor(np.hstack(low_pts[chan_range]),
np.hstack(high_pts[chan_range]),
method='distrib',
verbose=True)
plt.grid(True)
plt.xlabel(r"ln I$_{\rm int}$ / I$_{\rm SD}$")
plt.tight_layout()
plt.savefig(allfigs_path(f"Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.png"))
plt.savefig(allfigs_path(f"Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.pdf"))
print("Factor: {0}+/-{1}".format(sc_factor_chrange, sc_err_chrange))
# Factor: 1.105133+/-0.00463
# Error still underestimated
# The >1 factor is due to some emission in the GBT data being cut-off by the
# PB limit of the VLA mosaic. The factor increases far from the systemic
# velocity, where bright HI gets cut-off (compared to the larger 14B data).
# So, despite the != 1 factor, no factor will be applied to the SD data.
# Besides, the 14B mosaic comparison gives a 1.0 factor with the GBT data.
# The tests here were for consistency and that's what we find.
plt.close()
| mit |
joshua-cogliati-inl/moose | scripts/git_commit_history.py | 5 | 10712 | #!/usr/bin/env python
import sys
import subprocess
import datetime
import re
import numpy
import matplotlib.pyplot as plt
import multiprocessing
import argparse
import itertools
import os
# A helper function for running git commands
def run(*args, **kwargs):
options = kwargs.pop("options", None)
if options:
loc = os.getenv('MOOSE_DIR', os.path.join(os.getenv('HOME'), 'projects', 'moose'))
if options.framework:
loc = os.path.join(loc, 'framework')
elif options.modules:
loc = os.path.join(loc, 'modules')
args += ('--', loc)
output, _ = subprocess.Popen(args, stdout = subprocess.PIPE).communicate()
if kwargs.pop('split',True):
return filter(None, output.split('\n'))
else:
return output
# Return the list of contributors, sorted by the number of contributions
def getContributors(options, **kwargs):
# Get the number of authors
num_authors = kwargs.pop('authors', options.authors)
# Extract the authors and total number of commits
log = run('git', 'shortlog', '-s', '--no-merges', options=options)
authors = []
commits = []
for row in log:
r = row.split('\t')
commits.append(int(r[0]))
authors.append(r[1])
# Return the authors sorted by commit count
contributors = [x for (y,x) in sorted(zip(commits, authors), reverse=True)]
# Limit to the supplied number of authors
n = len(contributors)
if num_authors == 'moose':
contributors = ['Derek Gaston', 'Cody Permann', 'David Andrs', 'John W. Peterson', 'Andrew E. Slaughter', 'Brain Alger', 'Fande Kong']
contributors += ['Other (' + str(n-len(contributors)) + ')']
elif num_authors:
num_authors = int(num_authors)
contributors = contributors[0:num_authors]
contributors += ['Other (' + str(n-num_authors) + ')']
return contributors
# Return the date and contribution date
def getData(options):
# Build a list of contributors
contributors = getContributors(options)
# Flag for lumping into two categories MOOSE developers and non-moose developers
dev = options.moose_dev
if dev:
moose_developers = contributors[0:-1]
contributors = ['MOOSE developers (' + str(len(contributors)-1) + ')', contributors[-1]]
# Build a list of unique dates
all_dates = sorted(set(run('git', 'log', '--reverse', '--format=%ad', '--date=short', options=options)))
d1 = datetime.datetime.strptime(all_dates[0], '%Y-%m-%d')
d2 = datetime.datetime.strptime(all_dates[-1], '%Y-%m-%d')
dates = [d1 + datetime.timedelta(days=x) for x in range(0, (d2-d1).days, options.days)]
# Build the data arrays, filled with zeros
N = numpy.zeros((len(contributors), len(dates)), dtype=int)
data = {'commits' : numpy.zeros((len(contributors), len(dates)), dtype=int),
'in' : numpy.zeros((len(contributors), len(dates)), dtype=int),
'out' : numpy.zeros((len(contributors), len(dates)), dtype=int)}
contrib = numpy.zeros(len(dates), dtype=int)
all_contributors = getContributors(options, authors=None)
unique_contributors = []
# Get the additions/deletions
commits = run('git', 'log', '--format=%H\n%ad\n%aN', '--date=short', '--no-merges', '--reverse', '--shortstat', split=False, options=options)
commits = filter(None, re.split(r'[0-9a-z]{40}', commits))
# Loop over commits
for commit in commits:
c = filter(None, commit.split('\n'))
date = datetime.datetime.strptime(c[0], '%Y-%m-%d')
author = c[1]
if dev and author in moose_developers:
author = contributors[0]
elif author not in contributors:
author = contributors[-1]
i = contributors.index(author) # author index
d = filter(lambda x: x > date, dates)
if d:
j = dates.index(d[0])
else:
j = dates.index(dates[-1])
data['commits'][i,j] += 1
if options.additions and len(c) == 3:
a = c[2].split()
n = len(a)
files = int(a[0])
if n == 5:
if a[4].startswith('insertion'):
plus = int(a[3])
minus = 0
else:
minus = int(a[3])
plus = 0
else:
plus = int(a[3])
minus = int(a[5])
data['in'][i,j] += plus
data['out'][i,j] += minus
# Count unique contributions
unique_author_index = all_contributors.index(c[1])
unique_author = all_contributors[unique_author_index]
if unique_author not in unique_contributors:
unique_contributors.append(unique_author)
contrib[j] += 1
# Perform cumulative summations
data['commits'] = numpy.cumsum(data['commits'], axis=1)
contrib = numpy.cumsum(contrib)
# Return the data
return dates, data, contrib, contributors
# MAIN
if __name__ == '__main__':
# Command-line options
parser = argparse.ArgumentParser(description="Tool for building commit history of a git repository")
parser.add_argument('--additions', action='store_true', help='Show additions/deletions graph')
parser.add_argument('--days', type=int, default=1, help='The number of days to lump data (e.g., use 7 for weekly data)')
parser.add_argument('--disable-legend', action='store_true', help='Disable display of legend')
parser.add_argument('--stack', '-s', action='store_true', help='Show graph as stacked area instead of line plot')
parser.add_argument('--unique', '-u', action='store_true', help='Show unique contributor on secondary axis')
parser.add_argument('--unique-label', default='legend', choices=['none', 'arrow', 'legend'], help='Control how the unique contributor line is labeled.')
parser.add_argument('--open-source', '-r', action='store_true', help='Show shaded region for open sourcing of MOOSE')
parser.add_argument('--pdf', '--file', '-f', action='store_true', help='Write the plot to a pdf file (see --output)')
parser.add_argument('--output', '-o', type=str, default='commit_history.pdf', help='The filename for writting the plot to a file')
parser.add_argument('--authors', default=None, help='Limit the graph to the given number of entries authors, or use "moose" to limit to MOOSE developers')
parser.add_argument('--moose-dev', action='store_true', help='Create two categories: MOOSE developers and other (this overrides --authors)')
parser.add_argument('--framework', action='store_true', help='Limit the analysis to framework directory')
parser.add_argument('--modules', action='store_true', help='Limit the analysis to modules directory')
parser.add_argument('--font', default=12, help='The font-size, in points')
parser.parse_args('-surf'.split())
options = parser.parse_args()
# Markers/colors
marker = itertools.cycle(('o', 'v', 's', 'd'))
color = itertools.cycle(('g', 'r', 'b', 'c', 'm', 'y', 'k'))
# Setup authors defaults for various cases
if options.moose_dev and options.authors:
raise Exception("Can not specify both --authors and --moose-dev");
elif options.moose_dev:
options.authors = 'moose'
# Error if both --framework and --modules are given
if options.framework and options.modules:
raise Exception("Can not specify both --framework and --modules")
# Extract the data
dates, data, contrib, contributors = getData(options)
# Create the figure
fig, ax1 = plt.subplots()
fig.set_facecolor([1,1,1])
for tick in ax1.yaxis.get_ticklabels():
tick.set_fontsize(options.font)
for tick in ax1.xaxis.get_ticklabels():
tick.set_fontsize(options.font)
# Show unique contributors
if options.unique:
ax2 = ax1.twinx()
ax2.plot(dates, contrib, linewidth=4, linestyle='-', color='k', label='Unique Contributors')
ax2.set_ylabel('Unique Contributors', color='k', fontsize=options.font)
for tick in ax2.yaxis.get_ticklabels():
tick.set_fontsize(options.font)
for tick in ax2.xaxis.get_ticklabels():
tick.set_fontsize(options.font)
if options.unique_label == 'arrow':
arrow = dict(arrowstyle="-|>", connectionstyle="arc3,rad=0.3", fc="w")
i = int(len(dates)*0.75)
c = int(contrib[-1]*0.75)
ax2.annotate('Unique Contributors', xy=(dates[i], contrib[i]), xytext=(datetime.date(2014,1,1), c), ha='right', size=options.font, arrowprops=arrow)
# labels
y_label = 'Commits'
# Plot the data
if options.stack: # stack plot
handles = plt.stackplot(dates, data['commits'])
for i in range(len(handles)):
handles[i].set_label(contributors[i])
elif options.additions: #additions/deletions plot
factor = 1000.
y_label = 'Additions / Deletions (x1000)'
n = len(contributors)
for i in range(n):
x = numpy.array(dates)
y = numpy.array(data['in'][i,:])/factor
if n == 1:
label = 'Additions'
else:
label = contributors[i] + '(Additions)'
clr = color.next()
ax1.fill_between(x, 0, y, label=label, linewidth=2, edgecolor=clr, facecolor=clr, alpha=1.0)
y = -numpy.array(data['out'][i,:])/factor
if n == 1:
label = 'Deletions'
else:
label = contributors[i] + '(Deletions)'
clr = color.next()
ax1.fill_between(x, 0, y, label=label, linewidth=2, edgecolor=clr, facecolor=clr, alpha=1.0)
if not options.disable_legend:
handles, labels = ax1.get_legend_handles_labels()
if options.unique and options.unique_label== 'legend':
h, l = ax2.get_legend_handles_labels()
handles.append(h[0])
labels.append(l[0])
lgnd = plt.legend(handles, labels, loc='upper left', fontsize=options.font)
lgnd.draw_frame(False)
else: # line plot
handles = []
for i in range(len(contributors)):
x = numpy.array(dates)
y = data['commits'][i,:]
idx = y>0
h = ax1.plot(x[idx], y[idx], label=contributors[i], linewidth=2, markevery=60, marker=marker.next(), color=color.next())
handles.append(h[0])
if not options.disable_legend:
if options.unique and options.unique_label== 'legend':
h, l = ax2.get_legend_handles_labels()
handles.append(h[0])
contributors.append(l[0])
lgnd = plt.legend(handles, contributors, loc='upper left', fontsize=options.font)
lgnd.draw_frame(False)
# Add labels
ax1.set_ylabel(y_label, fontsize=options.font)
ax1.set_xlabel('Date', fontsize=options.font)
# Show open-source region
if options.open_source:
os = datetime.date(2014,3,10)
x_lim = ax2.get_xlim()
y_lim = ax1.get_ylim()
delta = x_lim[1] - os.toordinal()
plt.gca().add_patch(plt.Rectangle((os.toordinal(), y_lim[0]), delta, y_lim[1]-y_lim[0], facecolor='green', alpha=0.2))
ax1.annotate('Open-source ', xy=(x_lim[1] - (delta/2.), y_lim[0]), ha='center', va='bottom', size=options.font)
# Write to a file
if options.pdf:
fig.savefig(options.output)
plt.tight_layout()
plt.show()
| lgpl-2.1 |
andresailer/DIRAC | Core/Utilities/Graphs/CurveGraph.py | 4 | 4787 | ########################################################################
# $HeadURL$
########################################################################
""" CurveGraph represents simple line graphs with markers.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import darkenColor, to_timestamp, PrettyDateLocator, \
PrettyDateFormatter, PrettyScalarFormatter
from matplotlib.lines import Line2D
from matplotlib.dates import date2num
import datetime
class CurveGraph( PlotBase ):
"""
The CurveGraph class is a straightforward line graph with markers
"""
def __init__(self,data,ax,prefs,*args,**kw):
PlotBase.__init__(self,data,ax,prefs,*args,**kw)
def draw( self ):
PlotBase.draw(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['starttime'])))
end_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['endtime'])))
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [('SimplePlot',0.)]
color = self.prefs.get('plot_color','Default')
if color.find('#') != -1:
self.palette.setColor('SimplePlot',color)
else:
labels = [(color,0.)]
tmp_max_y = []
tmp_min_y = []
tmp_x = []
for label,num in labels:
xdata = []
ydata = []
xerror = []
yerror = []
color = self.palette.getColor(label)
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
continue
tmp_x.append( key )
tmp_max_y.append( value + error )
tmp_min_y.append( value - error )
xdata.append( key )
ydata.append( value )
xerror.append( 0. )
yerror.append( error )
linestyle = self.prefs.get( 'linestyle', '-' )
marker = self.prefs.get( 'marker', 'o' )
markersize = self.prefs.get( 'markersize', 8. )
markeredgewidth = self.prefs.get( 'markeredgewidth', 1. )
if not self.prefs.get( 'error_bars', False ):
line = Line2D( xdata, ydata, color=color, linewidth=1., marker=marker, linestyle=linestyle,
markersize=markersize, markeredgewidth=markeredgewidth,
markeredgecolor = darkenColor( color ) )
self.ax.add_line( line )
else:
self.ax.errorbar( xdata, ydata, color=color, linewidth=2., marker=marker, linestyle=linestyle,
markersize=markersize, markeredgewidth=markeredgewidth,
markeredgecolor = darkenColor( color ), xerr = xerror, yerr = yerror,
ecolor=color )
ymax = max( tmp_max_y )
ymax *= 1.1
ymin = min( tmp_min_y, 0. )
ymin *= 1.1
if self.prefs.has_key('log_yaxis'):
ymin = 0.001
xmax=max(tmp_x)*1.1
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get( 'ymin', ymin )
ymax = self.prefs.get( 'ymax', ymax )
xmin = self.prefs.get( 'xmin', xmin )
xmax = self.prefs.get( 'xmax', xmax )
self.ax.set_xlim( xmin=xmin, xmax=xmax )
self.ax.set_ylim( ymin=ymin, ymax=ymax )
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim( xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim( xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb( self, ax ):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks( [i+.5 for i in ticks] )
ax.set_xticklabels( [reverse_smap[i] for i in ticks] )
labels = ax.get_xticklabels()
ax.grid( False )
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim( xmin=xmin,xmax=len(ticks) )
elif self.gdata.key_type == "time":
dl = PrettyDateLocator()
df = PrettyDateFormatter( dl )
ax.xaxis.set_major_locator( dl )
ax.xaxis.set_major_formatter( df )
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter( )
ax.yaxis.set_major_formatter( sf )
else:
return None
| gpl-3.0 |
kmkolasinski/Quantulaba | plots/plot_SpinPolarizations.py | 2 | 1469 | #!/usr/bin/python
"""
Created on Thu Mar 5 14:16:21 2015
@author: Krzysztof Kolasinski
"""
import numpy as np
import matplotlib.pyplot as plt
import csv
import matplotlib.colors as colors
import matplotlib.cm as cmx
file = "polarizations.dat"
f = open(file)
lines = f.readlines()
plt.cla()
cmap = plt.cm.jet
scalarMap = cmx.ScalarMappable(cmap=cmap)
for i in range(np.size(lines)):
data = [float(x) for x in lines[i].split()]
no_modes = int(data[0])
Ef = data[1] # Fermi energy
kvecs = data[2:no_modes+2]# take wave vectors
pXYZ = data[no_modes+2:] # take polarizations
p = np.reshape(pXYZ,[3,no_modes])
print "m=",no_modes,"Ef=",Ef
#print p
for m in range(no_modes):
p[:,m] /= sqrt(sum(p[:,m]**2))
colors = cmx.jet((p[2,:]+1)/2)
plt.scatter(kvecs,[Ef]*no_modes,c=colors,zorder=2)
for m in range(no_modes):
#print "m=",m," p=", sqrt(sum(p[:,m]**2))
c = cmx.jet((p[2,m]+1)/2)
p[0,m] /= 25.0
p[:,m] /= 2.0
plt.arrow(kvecs[m], Ef, p[0,m], p[1,m], head_width=0.001, head_length=0.01,color=c)
file = "bands.dat"
data = np.loadtxt(file)
no_lines = np.size(data[0,:])
E0= 27211.384523
x = data[:,0]
for i in range(no_lines-1):
plt.plot(x,data[:,i+1]*E0,c='k',ls='-',zorder=0)
plt.xlim([-0.4,0.4])
plt.ylim([-0.1,6])
plt.xlabel("k [1/unit size]")
plt.ylabel("Energy [meV]") | mit |
zorojean/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
bjornaa/roppy | examples/plot_current25.py | 1 | 2273 | # ------------------------------------------------------
# current.py
#
# Plot a current field at fixed depth
# Modified from the spermplot example
#
# Bjørn Ådlandsvik <[email protected]>
# 2020-03-27
# ------------------------------------------------------
# -------------
# Imports
# -------------
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from roppy import SGrid
from roppy.mpl_util import landmask
from roppy.trajectories import curly_vectors
# -------------------------
# User settings
# ------------------------
ncfile = "data/ocean_avg_example.nc"
timeframe = 3 # Fourth time frame
# subgrid = (1,-1,1,-1) # whole grid except boundary cells
subgrid = (110, 170, 35, 90)
# Depth level [m]
z = 25
# Distance between vectors
stride = 2
# Speed level (isotachs)
speedlevels = np.linspace(0, 0.5, 6) # 0.0, 0.1, ...., 0.5
# Colormap for speed
speedcolors = "YlOrRd"
# --------------------
# Read the data
# --------------------
f = Dataset(ncfile)
grid = SGrid(f, subgrid=subgrid)
# Read 3D current for the subgrid
U0 = f.variables["u"][timeframe, :, grid.Ju, grid.Iu]
V0 = f.variables["v"][timeframe, :, grid.Jv, grid.Iv]
Mu = f.variables["mask_u"][grid.Ju, grid.Iu]
Mv = f.variables["mask_v"][grid.Jv, grid.Iv]
# f.close()
# ----------------------
# Handle the data
# ----------------------
# Interpolate to rho-points
U1 = 0.5 * (U0[:, :, :-1] + U0[:, :, 1:])
V1 = 0.5 * (V0[:, :-1, :] + V0[:, 1:, :])
# Interpolate to correct depth level
U = grid.zslice(U1, z)
V = grid.zslice(V1, z)
# Remove velocity at land and below bottom
U[grid.h < z] = np.nan
V[grid.h < z] = np.nan
# Compute the current speed
Speed = np.sqrt(U * U + V * V)
# Impose the stride
X = grid.X[::stride]
Y = grid.Y[::stride]
U = U[::stride, ::stride]
V = V[::stride, ::stride]
# --------------------
# Make the plot
# --------------------
# Contour plot of current speed
plt.contourf(grid.X, grid.Y, Speed, levels=speedlevels, cmap=speedcolors)
plt.colorbar()
# Make the vector plot
plt.quiver(X, Y, U, V, width=0.003)
# Plot green land mask
landmask(grid, "LightGreen")
# Set correct aspect ratio and axis limits
plt.axis("image")
plt.axis((grid.i0 + 0.5, grid.i1 - 1.5, grid.j0 + 0.5, grid.j1 - 1.5))
# Display the plot
plt.show()
| mit |
deepesch/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
chtaal/imgtosnd | example.py | 1 | 1335 | ## imports
import imgtosnd as i2s
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
## short-time fourier parameters
sample_rate = 16000 # sample rate in Hz
frame_size = i2s.next_pow2(sample_rate*25/1000) # frame size in samples
hop_size = int(frame_size/2) # hop size (samples)
## read image and get complex spectrogram with random phase
im = i2s.read_image_as_spec("data/me.jpg", frame_size, sample_rate, hop_size, num_seconds=20)
im **= 2
im = i2s.add_random_phase(im)
## convert image to audio via inverse short-time fft
x = i2s.st_irfft(im, frame_size, hop_size, win_sqrt=False)
## apply some normalization to the audio signal and write to disk
x /= np.max(np.abs(x))
wavfile.write('data\\me.wav', sample_rate, x)
## get back spectrogram of synthesized waveform
xf = i2s.st_rfft(x, frame_size, hop_size)
xf = np.abs(xf)
xf /= np.max(xf)
## plot stuff
ax = plt.subplot(121)
t = np.arange(np.size(x))/sample_rate
plt.plot(t, x)
plt.grid()
plt.xlabel('Time (s)')
plt.subplot(122, sharex=ax)
t = np.arange(np.shape(im)[1])/(sample_rate/hop_size)
f = np.fft.rfftfreq(frame_size)*sample_rate/1000
plt.pcolormesh(t, f, 20*np.log10(xf), cmap='plasma')
plt.colorbar(label='level(dB)')
plt.clim([-30, 0])
plt.colormaps()
plt.xlabel('Time (s)')
plt.ylabel('Frequency (kHz)')
plt.gcf().autofmt_xdate() | gpl-3.0 |
TaikiGoto/master | ch06/optimizer_compare_mnist.py | 4 | 1942 | # coding: utf-8
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.util import smooth_curve
from common.multi_layer_net import MultiLayerNet
from common.optimizer import *
# 0:MNISTデータの読み込み==========
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
train_size = x_train.shape[0]
batch_size = 128
max_iterations = 2000
# 1:実験の設定==========
optimizers = {}
optimizers['SGD'] = SGD()
optimizers['Momentum'] = Momentum()
optimizers['AdaGrad'] = AdaGrad()
optimizers['Adam'] = Adam()
#optimizers['RMSprop'] = RMSprop()
networks = {}
train_loss = {}
for key in optimizers.keys():
networks[key] = MultiLayerNet(
input_size=784, hidden_size_list=[100, 100, 100, 100],
output_size=10)
train_loss[key] = []
# 2:訓練の開始==========
for i in range(max_iterations):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
for key in optimizers.keys():
grads = networks[key].gradient(x_batch, t_batch)
optimizers[key].update(networks[key].params, grads)
loss = networks[key].loss(x_batch, t_batch)
train_loss[key].append(loss)
if i % 100 == 0:
print( "===========" + "iteration:" + str(i) + "===========")
for key in optimizers.keys():
loss = networks[key].loss(x_batch, t_batch)
print(key + ":" + str(loss))
# 3.グラフの描画==========
markers = {"SGD": "o", "Momentum": "x", "AdaGrad": "s", "Adam": "D"}
x = np.arange(max_iterations)
for key in optimizers.keys():
plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key)
plt.xlabel("iterations")
plt.ylabel("loss")
plt.ylim(0, 1)
plt.legend()
plt.show()
| mit |
imaculate/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
sarahgrogan/scikit-learn | sklearn/neighbors/base.py | 71 | 31147 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
matthewwardrop/python-mplkit | mplkit/cmap.py | 2 | 4459 | import matplotlib
from matplotlib.colors import Colormap, LinearSegmentedColormap, ListedColormap
import numpy as np
class WrappedColormap(Colormap):
"""
`WrappedColormap` wraps around an instance of `matplotlib.colors.Colormap`,
provides the `luma` method, but otherwise does nothing to the colormap.
"""
def __init__(self, cmap, *args, **kwargs):
assert(isinstance(cmap, Colormap))
self.cmap = cmap
self.init(*args,**kwargs)
def init(*args,**kwargs):
pass
def __call__(self, X, alpha=None, bytes=False):
return self.cmap(X, alpha=alpha, bytes=bytes)
def set_bad(self, color='k', alpha=None):
self.cmap.set_bad(color=color, alpha=alpha)
def set_under(self, color='k', alpha=None):
self.cmap.set_under(color=color, alpha=alpha)
def set_over(self, color='k', alpha=None):
self.cmap.set_over(color=color, alpha=alpha)
def _set_extremes(self):
self.cmap._set_extremes()
def _init(self):
"""Generate the lookup table, self._lut"""
return self.cmap._init()
def is_gray(self):
return self.cmap.is_gray()
def __getattr__(self, key):
return getattr(self.cmap,key)
def luma(self, X, alpha=None, bytes=False):
color = self(X, alpha=alpha, bytes=bytes)
def get_luma(c):
return np.average(c,axis=-1,weights=[0.299,0.587,0.114,0])
return get_luma(color)
class ReversedColormap(WrappedColormap):
'Reverses the color map.'
def __call__(self, X, alpha=None, bytes=False):
return self.cmap(1-X, alpha=alpha, bytes=bytes)
class InvertedColormap(WrappedColormap):
'Inverts the color map according to (R,G,B,A) - > (1-R,1-G,1-B,A).'
def __call__(self, X, alpha=None, bytes=False):
color = self.cmap(X, alpha=alpha, bytes=bytes)
def invert(c):
c = map(lambda x: 1-x, c)
c[-1] = 1
return tuple(c)
if isinstance(X, (np.ndarray,) ):
color = 1 - color
color[...,-1] = 1
return color
else:
return invert(color)
class DesaturatedColormap(WrappedColormap):
'Constructs a new colormap that preserves only the luma; or "brightess".'
def __call__(self, X, alpha=None, bytes=False):
color = self.cmap(X, alpha=alpha, bytes=bytes)
def get_luma(c):
return np.average(c,axis=-1,weights=[0.299,0.587,0.114,0])
r = np.kron(get_luma(color),[1,1,1,1]).reshape(np.shape(X)+(4,))
r[...,-1] = 1
return r
class ConcatenatedColormap(WrappedColormap):
"""
`ConcatenatedColormap` wraps around an instances of `matplotlib.colors.Colormap`,
and when used as a colour map returns the result of concatenating linearly the
maps. Should be initialised as:
ConcatenatedColormap(<cmap1>,0.2,<cmap2>,...)
Where the numbers indicate where the cmaps are joined. The 0 at the beginning and the 1
at the end are inferred.
"""
def init(self,*args):
self.cmaps = [self.cmap]
self.cmap_joins = []
assert(len(args) % 2 == 0)
for i in xrange(0,len(args),2):
self.cmap_joins.append(float(args[i]))
assert(args[i] < 1 and args[i] > 0 and (i==0 or args[i] > self.cmap_joins[-2]))
assert(isinstance(args[i+1],Colormap))
self.cmaps.append(args[i+1])
def __call__(self, X, alpha=None, bytes=False):
def get_color(v):
min = 0
max = 1
cmap_index = np.sum(np.array(self.cmap_joins) < v)
assert (cmap_index <= len(self.cmaps))
if (cmap_index > 0):
min = self.cmap_joins[cmap_index-1]
if (cmap_index < len(self.cmaps)-1):
max = self.cmap_joins[cmap_index]
scaled_v = (v-min)/(max-min)
return tuple(self.cmaps[cmap_index](scaled_v,alpha=alpha,bytes=bytes))
if isinstance(X,np.ndarray):
vfunc = np.vectorize(get_color,otypes=["float","float","float", "float"])
return np.rollaxis(np.array(vfunc(X)),0,len(X.shape)+1)
else:
return get_color(X)
def set_bad(self, color='k', alpha=None):
pass#self.cmap.set_bad(color=color, alpha=alpha)
def set_under(self, color='k', alpha=None):
pass#self.cmap.set_under(color=color, alpha=alpha)
def set_over(self, color='k', alpha=None):
pass#self.cmap.set_over(color=color, alpha=alpha)
def _set_extremes(self):
pass#self.cmap._set_extremes()
def _init(self):
"""Generate the lookup table, self._lut"""
for cm in self.cmaps:
cm._init()
def is_gray(self):
return np.all([cm.is_gray() for cm in self.cmaps])
def __getattr__(self, key):
return getattr(self.cmap,key)
def luma(self, X, alpha=None, bytes=False):
color = self(X, alpha=alpha, bytes=bytes)
def get_luma(c):
return np.average(c,axis=-1,weights=[0.299,0.587,0.114,0])
return get_luma(color)
| mit |
ahaldane/numpy | numpy/doc/creation.py | 24 | 5496 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to NumPy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic NumPy Array Creation
==============================
NumPy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64. ::
>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: h5py
FITS: Astropy
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
glouppe/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | 104 | 2878 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
jjs0sbw/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/rcsetup.py | 2 | 25707 | """
The rcsetup module contains the default values and the validation code for
customization using matplotlib's rc settings.
Each rc setting is assigned a default value and a function used to validate any
attempted changes to that setting. The default values and validation functions
are defined in the rcsetup module, and are used to construct the rcParams global
object which stores the settings and is referenced throughout matplotlib.
These default values should be consistent with the default matplotlibrc file
that actually reflects the values given here. Any additions or deletions to the
parameter set listed here should also be visited to the
:file:`matplotlibrc.template` in matplotlib's root source directory.
"""
import os
import warnings
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib.colors import is_color_like
#interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'fltkagg', 'qtagg', 'qt4agg',
# 'tkagg', 'wx', 'wxagg', 'cocoaagg']
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'FltkAgg', 'MacOSX',
'QtAgg', 'Qt4Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg']
non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk',
'pdf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False):
'valid is a list of legal strings'
self.key = key
self.ignorecase = ignorecase
def func(s):
if ignorecase: return s.lower()
else: return s
self.valid = dict([(func(k),k) for k in valid])
def __call__(self, s):
if self.ignorecase: s = s.lower()
if s in self.valid: return self.valid[s]
raise ValueError('Unrecognized %s string "%s": valid strings are %s'
% (self.key, s, self.valid.values()))
def validate_path_exists(s):
'If s is a path, return s, else False'
if os.path.exists(s): return s
else:
raise RuntimeError('"%s" should be a path but it does not exist'%s)
def validate_bool(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_bool_maybe_none(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b=='none': return None
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_float(s):
'convert s to float or raise'
try: return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_int(s):
'convert s to int or raise'
try: return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s)
def validate_fonttype(s):
'confirm that this is a Postscript of PDF font type that we know how to convert to'
fonttypes = { 'type3': 3,
'truetype': 42 }
try:
fonttype = validate_int(s)
except ValueError:
if s.lower() in fonttypes.keys():
return fonttypes[s.lower()]
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.keys())
else:
if fonttype not in fonttypes.values():
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.values())
return fonttype
#validate_backend = ValidateInStrings('backend', all_backends, ignorecase=True)
_validate_standard_backends = ValidateInStrings('backend', all_backends, ignorecase=True)
def validate_backend(s):
if s.startswith('module://'): return s
else: return _validate_standard_backends(s)
validate_qt4 = ValidateInStrings('backend.qt4', ['PyQt4', 'PySide'])
validate_toolbar = ValidateInStrings('toolbar',[
'None','classic','toolbar2',
], ignorecase=True)
def validate_autolayout(v):
if v:
warnings.warn("figure.autolayout is not currently supported")
def validate_maskedarray(v):
# 2008/12/12: start warning; later, remove all traces of maskedarray
try:
if v == 'obsolete':
return v
except ValueError:
pass
warnings.warn('rcParams key "maskedarray" is obsolete and has no effect;\n'
' please delete it from your matplotlibrc file')
class validate_nseq_float:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n floats or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [float(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to floats')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [float(val) for val in s]
class validate_nseq_int:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n ints or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [int(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to ints')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [int(val) for val in s]
def validate_color(s):
'return a valid color arg'
try:
if s.lower() == 'none':
return 'None'
except AttributeError:
pass
if is_color_like(s):
return s
stmp = '#' + s
if is_color_like(stmp):
return stmp
# If it is still valid, it must be a tuple.
colorarg = s
msg = ''
if s.find(',')>=0:
# get rid of grouping symbols
stmp = ''.join([ c for c in s if c.isdigit() or c=='.' or c==','])
vals = stmp.split(',')
if len(vals)!=3:
msg = '\nColor tuples must be length 3'
else:
try:
colorarg = [float(val) for val in vals]
except ValueError:
msg = '\nCould not convert all entries to floats'
if not msg and is_color_like(colorarg):
return colorarg
raise ValueError('%s does not look like a color arg%s'%(s, msg))
def validate_colorlist(s):
'return a list of colorspecs'
if type(s) is str:
return [validate_color(c.strip()) for c in s.split(',')]
else:
assert type(s) in [list, tuple]
return [validate_color(c) for c in s]
def validate_stringlist(s):
'return a list'
if type(s) is str:
return [ v.strip() for v in s.split(',') ]
else:
assert type(s) in [list,tuple]
return [ str(v) for v in s ]
validate_orientation = ValidateInStrings('orientation',[
'landscape', 'portrait',
])
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid aspect specification')
def validate_fontsize(s):
if type(s) is str:
s = s.lower()
if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large',
'xx-large', 'smaller', 'larger']:
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid font size')
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'stixsans', 'custom'])
validate_mathtext_default = ValidateInStrings(
'default', "rm cal it tt sf bf default bb frak circled scr regular".split())
validate_verbose = ValidateInStrings('verbose',[
'silent', 'helpful', 'debug', 'debug-annoying',
])
validate_cairo_format = ValidateInStrings('cairo_format',
['png', 'ps', 'pdf', 'svg'],
ignorecase=True)
validate_ps_papersize = ValidateInStrings('ps_papersize',[
'auto', 'letter', 'legal', 'ledger',
'a0', 'a1', 'a2','a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
], ignorecase=True)
def validate_ps_distiller(s):
if type(s) is str:
s = s.lower()
if s in ('none',None):
return None
elif s in ('false', False):
return False
elif s in ('ghostscript', 'xpdf'):
return s
else:
raise ValueError('matplotlibrc ps.usedistiller must either be none, ghostscript or xpdf')
validate_joinstyle = ValidateInStrings('joinstyle',['miter', 'round', 'bevel'], ignorecase=True)
validate_capstyle = ValidateInStrings('capstyle',['butt', 'round', 'projecting'], ignorecase=True)
validate_negative_linestyle = ValidateInStrings('negative_linestyle',['solid', 'dashed'], ignorecase=True)
def validate_negative_linestyle_legacy(s):
try:
res = validate_negative_linestyle(s)
return res
except ValueError:
dashes = validate_nseq_float(2)(s)
warnings.warn("Deprecated negative_linestyle specification; use 'solid' or 'dashed'")
return (0, dashes) # (offset, (solid, blank))
def validate_tkpythoninspect(s):
# Introduced 2010/07/05
warnings.warn("tk.pythoninspect is obsolete, and has no effect")
return validate_bool(s)
validate_legend_loc = ValidateInStrings('legend_loc',[
'best',
'upper right',
'upper left',
'lower left',
'lower right',
'right',
'center left',
'center right',
'lower center',
'upper center',
'center',
], ignorecase=True)
def deprecate_svg_embed_char_paths(value):
warnings.warn("svg.embed_char_paths is deprecated. Use svg.fonttype instead.")
validate_svg_fonttype = ValidateInStrings('fonttype', ['none', 'path', 'svgfont'])
class ValidateInterval:
"""
Value must be in interval
"""
def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
self.vmin = vmin
self.vmax = vmax
self.cmin = closedmin
self.cmax = closedmax
def __call__(self, s):
try: s = float(s)
except: raise RuntimeError('Value must be a float; found "%s"'%s)
if self.cmin and s<self.vmin:
raise RuntimeError('Value must be >= %f; found "%f"'%(self.vmin, s))
elif not self.cmin and s<=self.vmin:
raise RuntimeError('Value must be > %f; found "%f"'%(self.vmin, s))
if self.cmax and s>self.vmax:
raise RuntimeError('Value must be <= %f; found "%f"'%(self.vmax, s))
elif not self.cmax and s>=self.vmax:
raise RuntimeError('Value must be < %f; found "%f"'%(self.vmax, s))
return s
# a map from key -> value, converter
defaultParams = {
'backend' : ['Agg', validate_backend], # agg is certainly present
'backend_fallback' : [True, validate_bool], # agg is certainly present
'backend.qt4' : ['PyQt4', validate_qt4],
'toolbar' : ['toolbar2', validate_toolbar],
'datapath' : [None, validate_path_exists], # handled by _get_data_path_cached
'interactive' : [False, validate_bool],
'timezone' : ['UTC', str],
# the verbosity setting
'verbose.level' : ['silent', validate_verbose],
'verbose.fileo' : ['sys.stdout', str],
# line props
'lines.linewidth' : [1.0, validate_float], # line width in points
'lines.linestyle' : ['-', str], # solid line
'lines.color' : ['b', validate_color], # blue
'lines.marker' : ['None', str], # black
'lines.markeredgewidth' : [0.5, validate_float],
'lines.markersize' : [6, validate_float], # markersize, in points
'lines.antialiased' : [True, validate_bool], # antialised (no jaggies)
'lines.dash_joinstyle' : ['round', validate_joinstyle],
'lines.solid_joinstyle' : ['round', validate_joinstyle],
'lines.dash_capstyle' : ['butt', validate_capstyle],
'lines.solid_capstyle' : ['projecting', validate_capstyle],
# patch props
'patch.linewidth' : [1.0, validate_float], # line width in points
'patch.edgecolor' : ['k', validate_color], # black
'patch.facecolor' : ['b', validate_color], # blue
'patch.antialiased' : [True, validate_bool], # antialised (no jaggies)
# font props
'font.family' : ['sans-serif', str], # used by text object
'font.style' : ['normal', str], #
'font.variant' : ['normal', str], #
'font.stretch' : ['normal', str], #
'font.weight' : ['normal', str], #
'font.size' : [12, validate_float], # Base font size in points
'font.serif' : [['Bitstream Vera Serif', 'DejaVu Serif',
'New Century Schoolbook', 'Century Schoolbook L',
'Utopia', 'ITC Bookman', 'Bookman',
'Nimbus Roman No9 L','Times New Roman',
'Times','Palatino','Charter','serif'],
validate_stringlist],
'font.sans-serif' : [['Bitstream Vera Sans', 'DejaVu Sans',
'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
validate_stringlist],
'font.cursive' : [['Apple Chancery','Textile','Zapf Chancery',
'Sand','cursive'], validate_stringlist],
'font.fantasy' : [['Comic Sans MS','Chicago','Charcoal','Impact'
'Western','fantasy'], validate_stringlist],
'font.monospace' : [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono',
'Andale Mono', 'Nimbus Mono L', 'Courier New',
'Courier','Fixed', 'Terminal','monospace'],
validate_stringlist],
# text props
'text.color' : ['k', validate_color], # black
'text.usetex' : [False, validate_bool],
'text.latex.unicode' : [False, validate_bool],
'text.latex.preamble' : [[''], validate_stringlist],
'text.latex.preview' : [False, validate_bool],
'text.dvipnghack' : [None, validate_bool_maybe_none],
'text.hinting' : [True, validate_bool],
'text.antialiased' : [True, validate_bool],
# The following are deprecated and replaced by, e.g., 'font.style'
#'text.fontstyle' : ['normal', str],
#'text.fontangle' : ['normal', str],
#'text.fontvariant' : ['normal', str],
#'text.fontweight' : ['normal', str],
#'text.fontsize' : ['medium', validate_fontsize],
'mathtext.cal' : ['cursive', validate_font_properties],
'mathtext.rm' : ['serif', validate_font_properties],
'mathtext.tt' : ['monospace', validate_font_properties],
'mathtext.it' : ['serif:italic', validate_font_properties],
'mathtext.bf' : ['serif:bold', validate_font_properties],
'mathtext.sf' : ['sans\-serif', validate_font_properties],
'mathtext.fontset' : ['cm', validate_fontset],
'mathtext.default' : ['it', validate_mathtext_default],
'mathtext.fallback_to_cm' : [True, validate_bool],
'image.aspect' : ['equal', validate_aspect], # equal, auto, a number
'image.interpolation' : ['bilinear', str],
'image.cmap' : ['jet', str], # one of gray, jet, etc
'image.lut' : [256, validate_int], # lookup table
'image.origin' : ['upper', str], # lookup table
'image.resample' : [False, validate_bool],
'contour.negative_linestyle' : ['dashed', validate_negative_linestyle_legacy],
# axes props
'axes.axisbelow' : [False, validate_bool],
'axes.hold' : [True, validate_bool],
'axes.facecolor' : ['w', validate_color], # background color; white
'axes.edgecolor' : ['k', validate_color], # edge color; black
'axes.linewidth' : [1.0, validate_float], # edge linewidth
'axes.titlesize' : ['large', validate_fontsize], # fontsize of the axes title
'axes.grid' : [False, validate_bool], # display grid or not
'axes.labelsize' : ['medium', validate_fontsize], # fontsize of the x any y labels
'axes.labelweight' : ['normal', str], # fontsize of the x any y labels
'axes.labelcolor' : ['k', validate_color], # color of axis label
'axes.formatter.limits' : [[-7, 7], validate_nseq_int(2)],
# use scientific notation if log10
# of the axis range is smaller than the
# first or larger than the second
'axes.formatter.use_locale' : [False, validate_bool], # Use the current locale to format ticks
'axes.unicode_minus' : [True, validate_bool],
'axes.color_cycle' : [['b','g','r','c','m','y','k'],
validate_colorlist], # cycle of plot
# line colors
'polaraxes.grid' : [True, validate_bool], # display polar grid or not
'axes3d.grid' : [True, validate_bool], # display 3d grid
#legend properties
'legend.fancybox' : [False,validate_bool],
'legend.loc' : ['upper right',validate_legend_loc], # at some point, this should be changed to 'best'
'legend.isaxes' : [True,validate_bool], # this option is internally ignored - it never served any useful purpose
'legend.numpoints' : [2, validate_int], # the number of points in the legend line
'legend.fontsize' : ['large', validate_fontsize],
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
'legend.shadow' : [False, validate_bool],
'legend.frameon' : [True, validate_bool], # whether or not to draw a frame around legend
# the following dimensions are in fraction of the font size
'legend.borderpad' : [0.4, validate_float], # units are fontsize
'legend.labelspacing' : [0.5, validate_float], # the vertical space between the legend entries
'legend.handlelength' : [2., validate_float], # the length of the legend lines
'legend.handleheight' : [0.7, validate_float], # the length of the legend lines
'legend.handletextpad' : [.8, validate_float], # the space between the legend line and legend text
'legend.borderaxespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.columnspacing' : [2., validate_float], # the border between the axes and legend edge
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
'legend.shadow' : [False, validate_bool],
# tick properties
'xtick.major.size' : [4, validate_float], # major xtick size in points
'xtick.minor.size' : [2, validate_float], # minor xtick size in points
'xtick.major.width': [0.5, validate_float], # major xtick width in points
'xtick.minor.width': [0.5, validate_float], # minor xtick width in points
'xtick.major.pad' : [4, validate_float], # distance to label in points
'xtick.minor.pad' : [4, validate_float], # distance to label in points
'xtick.color' : ['k', validate_color], # color of the xtick labels
'xtick.labelsize' : ['medium', validate_fontsize], # fontsize of the xtick labels
'xtick.direction' : ['in', str], # direction of xticks
'ytick.major.size' : [4, validate_float], # major ytick size in points
'ytick.minor.size' : [2, validate_float], # minor ytick size in points
'ytick.major.width': [0.5, validate_float], # major ytick width in points
'ytick.minor.width': [0.5, validate_float], # minor ytick width in points
'ytick.major.pad' : [4, validate_float], # distance to label in points
'ytick.minor.pad' : [4, validate_float], # distance to label in points
'ytick.color' : ['k', validate_color], # color of the ytick labels
'ytick.labelsize' : ['medium', validate_fontsize], # fontsize of the ytick labels
'ytick.direction' : ['in', str], # direction of yticks
'grid.color' : ['k', validate_color], # grid color
'grid.linestyle' : [':', str], # dotted
'grid.linewidth' : [0.5, validate_float], # in points
# figure props
# figure size in inches: width by height
'figure.figsize' : [ [8.0,6.0], validate_nseq_float(2)],
'figure.dpi' : [ 80, validate_float], # DPI
'figure.facecolor' : [ '0.75', validate_color], # facecolor; scalar gray
'figure.edgecolor' : [ 'w', validate_color], # edgecolor; white
'figure.autolayout' : [ False, validate_autolayout],
'figure.subplot.left' : [0.125, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.right' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.bottom' : [0.1, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.top' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.wspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'figure.subplot.hspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'savefig.dpi' : [100, validate_float], # DPI
'savefig.facecolor' : ['w', validate_color], # facecolor; white
'savefig.edgecolor' : ['w', validate_color], # edgecolor; white
'savefig.orientation' : ['portrait', validate_orientation], # edgecolor; white
'savefig.extension' : ['auto', str], # what to add to extensionless filenames
'cairo.format' : ['png', validate_cairo_format],
'tk.window_focus' : [False, validate_bool], # Maintain shell focus for TkAgg
'tk.pythoninspect' : [False, validate_tkpythoninspect], # obsolete
'ps.papersize' : ['letter', validate_ps_papersize], # Set the papersize/type
'ps.useafm' : [False, validate_bool], # Set PYTHONINSPECT
'ps.usedistiller' : [False, validate_ps_distiller], # use ghostscript or xpdf to distill ps output
'ps.distiller.res' : [6000, validate_int], # dpi
'ps.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'pdf.compression' : [6, validate_int], # compression level from 0 to 9; 0 to disable
'pdf.inheritcolor' : [False, validate_bool], # ignore any color-setting commands from the frontend
'pdf.use14corefonts' : [False, validate_bool], # use only the 14 PDF core fonts
# embedded in every PDF viewing application
'pdf.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'svg.image_inline' : [True, validate_bool], # write raster image data directly into the svg file
'svg.image_noscale' : [False, validate_bool], # suppress scaling of raster data embedded in SVG
'svg.embed_char_paths' : [True, deprecate_svg_embed_char_paths], # True to save all characters as paths in the SVG
'svg.fonttype' : ['path', validate_svg_fonttype],
'docstring.hardcopy' : [False, validate_bool], # set this when you want to generate hardcopy docstring
'plugins.directory' : ['.matplotlib_plugins', str], # where plugin directory is locate
'path.simplify' : [True, validate_bool],
'path.simplify_threshold' : [1.0 / 9.0, ValidateInterval(0.0, 1.0)],
'path.snap' : [True, validate_bool],
'agg.path.chunksize' : [0, validate_int], # 0 to disable chunking;
# recommend about 20000 to
# enable. Experimental.
# key-mappings
'keymap.fullscreen' : ['f', validate_stringlist],
'keymap.home' : [['h', 'r', 'home'], validate_stringlist],
'keymap.back' : [['left', 'c', 'backspace'], validate_stringlist],
'keymap.forward' : [['right', 'v'], validate_stringlist],
'keymap.pan' : ['p', validate_stringlist],
'keymap.zoom' : ['o', validate_stringlist],
'keymap.save' : ['s', validate_stringlist],
'keymap.grid' : ['g', validate_stringlist],
'keymap.yscale' : ['l', validate_stringlist],
'keymap.xscale' : [['k', 'L'], validate_stringlist],
'keymap.all_axes' : ['a', validate_stringlist],
# sample data
'examples.download' : [True, validate_bool],
'examples.directory' : ['', str],
}
if __name__ == '__main__':
rc = defaultParams
rc['datapath'][0] = '/'
for key in rc:
if not rc[key][1](rc[key][0]) == rc[key][0]:
print "%s: %s != %s"%(key, rc[key][1](rc[key][0]), rc[key][0])
| gpl-3.0 |
IBT-FMI/SAMRI | samri/pipelines/diagnostics.py | 1 | 9355 | from os import path, listdir, getcwd, remove
import inspect
import json
import re
import shutil
from copy import deepcopy
from itertools import product
import argh
import nipype.interfaces.ants as ants
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
import nipype.pipeline.engine as pe
import pandas as pd
from nipype.interfaces import fsl, nipy, bru2nii
from samri.pipelines.extra_functions import force_dummy_scans, get_tr
from samri.pipelines.utils import out_path, container
from samri.utilities import N_PROCS
#set all outputs to compressed NIfTI
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
# Python2 compatibility
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
@argh.arg('-e','--exclude', type=json.loads)
@argh.arg('-i','--include', type=json.loads)
def diagnose(bids_base,
components=None,
debug=False,
exclude={},
include={},
keep_crashdump=False,
keep_work=False,
match_regex='.+/sub-(?P<sub>[a-zA-Z0-9]+)/ses-(?P<ses>[a-zA-Z0-9]+)/.*?_task-(?P<task>[a-zA-Z0-9]+)_acq-(?P<acq>[a-zA-Z0-9]+)_run-(?P<run>[a-zA-Z0-9]+)_(?P<mod>[a-zA-Z0-9]+).(?:nii|nii\.gz)',
n_procs=N_PROCS,
realign="time",
tr=None,
workflow_name="diagnostic",
):
'''Run a basic independent component analysis diagnotic (using FSL's MELODIC) on functional MRI data stored in a BIDS directory tree.
Parameters
----------
bids_base : string, optional
Path to the top level of a BIDS directory tree for which to perform the diagnostic.
components : int, optional
Number of independent components to produce for each functional measurement; if evaluated as False, the number of components is automatically optimized for the given data by FSL's MELODIC.
debug : bool, optional
Enable full nipype debugging support for the workflow construction and execution.
exclude : dict, optional
A dictionary with any subset of 'subject', 'session', 'acquisition', 'task', 'modality', and 'path' as keys and corresponding identifiers as values.
This is a blacklist: if this is specified only non-matching entries will be included in the analysis.
include : dict, optional
A dictionary with any subset of 'subject', 'session', 'acquisition', 'task', 'modality', and 'path' as keys and corresponding identifiers as values.
This is a whitelist: if this is specified only matching entries will be included in the analysis.
keep_crashdump : bool, optional
Whether to keep the crashdump directory (containing all the crash reports for intermediary workflow steps, as managed by nipypye).
This is useful for debugging and quality control.
keep_work : bool, optional
Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
This is useful for debugging and quality control.
match_regex : str, optional
Regex matching pattern by which to select input files. Has to contain groups named "sub", "ses", "acq", "task", and "mod".
n_procs : int, optional
Maximum number of processes which to simultaneously spawn for the workflow.
If not explicitly defined, this is automatically calculated from the number of available cores and under the assumption that the workflow will be the main process running for the duration that it is running.
realign : {"space","time","spacetime",""}
Parameter that dictates slictiming correction and realignment of slices. "time" (FSL.SliceTimer) is default, since it works safely. Use others only with caution!
tr : int, optional
Repetition time (in seconds); if evaluated as False, the TR will be read from the NIfTI header of each file individually.
workflow_name : string, optional
Name of the workflow execution. The output will be saved one level above the bids_base, under a directory bearing the name given here.
'''
bids_base = path.abspath(path.expanduser(bids_base))
datafind = nio.DataFinder()
datafind.inputs.root_paths = bids_base
datafind.inputs.match_regex = match_regex
datafind_res = datafind.run()
data_selection = zip(*[datafind_res.outputs.sub, datafind_res.outputs.ses, datafind_res.outputs.acq, datafind_res.outputs.task, datafind_res.outputs.mod, datafind_res.outputs.out_paths])
data_selection = [list(i) for i in data_selection]
data_selection = pd.DataFrame(data_selection,columns=('subject','session','acquisition','task','modality','path'))
data_selection = data_selection.sort_values(['session', 'subject'], ascending=[1, 1])
if exclude:
for key in exclude:
data_selection = data_selection[~data_selection[key].isin(exclude[key])]
if include:
for key in include:
data_selection = data_selection[data_selection[key].isin(include[key])]
data_selection['out_path'] = ''
if data_selection['path'].str.contains('.nii.gz').any():
data_selection['out_path'] = data_selection['path'].apply(lambda x: path.basename(path.splitext(path.splitext(x)[0])[0]+'_MELODIC'))
else:
data_selection['out_path'] = data_selection['path'].apply(lambda x: path.basename(path.splitext(x)[0]+'_MELODIC'))
paths = data_selection['path']
infosource = pe.Node(interface=util.IdentityInterface(fields=['path'], mandatory_inputs=False), name="infosource")
infosource.iterables = [('path', paths)]
dummy_scans = pe.Node(name='dummy_scans', interface=util.Function(function=force_dummy_scans,input_names=inspect.getargspec(force_dummy_scans)[0], output_names=['out_file','deleted_scans']))
dummy_scans.inputs.desired_dummy_scans = 10
bids_filename = pe.Node(name='bids_filename', interface=util.Function(function=out_path,input_names=inspect.getargspec(out_path)[0], output_names=['filename']))
bids_filename.inputs.selection_df = data_selection
bids_container = pe.Node(name='path_container', interface=util.Function(function=container,input_names=inspect.getargspec(container)[0], output_names=['container']))
bids_container.inputs.selection_df = data_selection
datasink = pe.Node(nio.DataSink(), name='datasink')
datasink.inputs.base_directory = path.abspath(path.join(bids_base,'..',workflow_name))
datasink.inputs.parameterization = False
melodic = pe.Node(interface=fsl.model.MELODIC(), name="melodic")
if tr:
melodic.inputs.tr_sec = tr
melodic.inputs.report = True
if components:
melodic.inputs.dim = int(components)
workflow_connections = [
(infosource, dummy_scans, [('path', 'in_file')]),
(infosource, bids_filename, [('path', 'in_path')]),
(bids_filename, bids_container, [('filename', 'out_path')]),
(bids_filename, melodic, [('filename', 'out_dir')]),
(bids_container, datasink, [('container', 'container')]),
(melodic, datasink, [('out_dir', 'func')]),
]
if not tr:
report_tr = pe.Node(name='report_tr', interface=util.Function(function=get_tr,input_names=inspect.getargspec(get_tr)[0], output_names=['tr']))
report_tr.inputs.ndim = 4
workflow_connections.extend([
(infosource, report_tr, [('path', 'in_file')]),
(report_tr, melodic, [('tr', 'tr_sec')]),
])
if realign == "space":
realigner = pe.Node(interface=spm.Realign(), name="realigner")
realigner.inputs.register_to_mean = True
workflow_connections.extend([
(dummy_scans, realigner, [('out_file', 'in_file')]),
(realigner, melodic, [('out_file', 'in_files')]),
])
elif realign == "spacetime":
realigner = pe.Node(interface=nipy.SpaceTimeRealigner(), name="realigner")
realigner.inputs.slice_times = "asc_alt_2"
if tr:
realigner.inputs.tr = tr
else:
workflow_connections.extend([
(report_tr, realigner, [('tr', 'tr')]),
])
#3 for coronal slices (2 for horizontal, 1 for sagittal)
realigner.inputs.slice_info = 3
workflow_connections.extend([
(dummy_scans, realigner, [('out_file', 'in_file')]),
(realigner, melodic, [('out_file', 'in_files')]),
])
elif realign == "time":
realigner = pe.Node(interface=fsl.SliceTimer(), name="slicetimer")
if tr:
realigner.inputs.time_repetition = tr
else:
workflow_connections.extend([
(report_tr, realigner, [('tr', 'time_repetition')]),
])
workflow_connections.extend([
(dummy_scans, realigner, [('out_file', 'in_file')]),
(realigner, melodic, [('slice_time_corrected_file', 'in_files')]),
])
else:
workflow_connections.extend([
(dummy_scans, melodic, [('out_file', 'in_files')]),
])
crashdump_dir = path.abspath(path.join(bids_base,'..',workflow_name+'_crashdump'))
workflow_config = {'execution': {'crashdump_dir': crashdump_dir}}
if debug:
workflow_config['logging'] = {
'workflow_level':'DEBUG',
'utils_level':'DEBUG',
'interface_level':'DEBUG',
'filemanip_level':'DEBUG',
'log_to_file':'true',
}
workdir_name = workflow_name+'_work'
workflow = pe.Workflow(name=workdir_name)
workflow.connect(workflow_connections)
workflow.base_dir = path.abspath(path.join(bids_base,'..'))
workflow.config = workflow_config
workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph.dot"), graph2use="hierarchical", format="png")
if not keep_work or not keep_crashdump:
try:
workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_procs})
except RuntimeError:
pass
else:
workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_procs})
if not keep_work:
shutil.rmtree(path.join(workflow.base_dir,workdir_name))
if not keep_crashdump:
try:
shutil.rmtree(crashdump_dir)
except (FileNotFoundError, OSError):
pass
return
| gpl-3.0 |
aabadie/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
MTgeophysics/mtpy | legacy/pek2dplotting.py | 1 | 4152 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 08 11:16:46 2014
@author: a1655681
"""
import matplotlib.pyplot as plt
import numpy as np
class PlotResponses():
"""
plot responses and data from any 2d model, requires two dictionaries
containing period, resistivity, phase arrays with the following dimensions:
period: (nperiod)
resistivity: (num_stations, num_periods, 2, 2)
phase: (num_stations, num_periods, 2, 2)
"""
def __init__(self, measured, modelled, **input_parameters):
self.period_mod = modelled['period']
self.period_meas = measured['period']
self.resistivity_mod = modelled['resistivity']
self.resistivity_meas = measured['resistivity']
self.phase_mod = modelled['phase']
self.phase_meas = measured['phase']
self.subplot_rows = 1 # number of rows to plot
self.plot_tf = True # whether or not to plot on creation of an object
self.symbol_dict = {
'measured': '--',
'modelled': '-',
'colors': [
'b',
'r']}
self.xlim = [1e-3, 1e3]
self.ylim = {'resistivity': [[0.01, 1], [
1, 100]], 'phase': [[0, 90], [0, 90]]}
for key in input_parameters.keys():
setattr(self, key, input_parameters[key])
if self.plot_tf:
self.plot()
def __set_axis_params(self, parameter, component=1, labels='xy'):
"""
component = diagonals or offdiagonals, 1 for diags, 0 for offdiags
"""
ax = plt.gca()
plt.xscale('log', nonposx='clip')
if parameter == 'resistivity':
plt.yscale('log', nonposy='clip')
plt.xlim(self.xlim)
plt.ylim(self.ylim[parameter][component])
if 'x' not in labels:
ax.xaxis.set_visible(False)
if 'y' not in labels:
ax.yaxis.set_visible(False)
# def prepare_dictionaries(self):
def plot(self):
"""
"""
nsp = len(self.resistivity_mod)
# rows, columns in subplots
# multiply rows by 2 as we are plotting res and phase
spr = self.subplot_rows * 2
# number of columns
spc = 2 * int(np.ceil(float(nsp) / float(self.subplot_rows)))
# starting subplot number
row = -1
for i in range(nsp):
if (2 * i) % spc == 0:
row += 1
spn = 2 * i + 1 + spc * row
for parameter, meas, mod in [['resistivity', self.resistivity_meas[i], self.resistivity_mod[i]],
['phase', self.phase_meas[i] % 180, self.phase_mod[i] % 180]]:
print i
for ii in range(2):
for jj in range(2):
labels = ''
if row == int(spr / 2.) - 1:
if parameter == 'phase':
labels += 'x'
if (2 * i) % spc == 0:
if (ii != jj):
labels += 'y'
if ii == jj:
cpt = 0
plt.subplot(spr, spc, spn + 1)
else:
cpt = 1
plt.subplot(spr, spc, spn)
if ii == 0:
c = self.symbol_dict['colors'][0]
else:
c = self.symbol_dict['colors'][1]
plt.plot(self.period_meas[i], meas[:, ii, jj],
self.symbol_dict['measured'],
color=c)
plt.plot(self.period_mod[i], mod[:, ii, jj],
self.symbol_dict['modelled'],
color=c)
self.__set_axis_params(parameter,
component=cpt,
labels=labels)
spn += spc
plt.subplots_adjust(hspace=0.02, wspace=0.02)
| gpl-3.0 |
mattilyra/scikit-learn | sklearn/neighbors/graph.py | 14 | 6663 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self):
"""Return the query based on include_self param"""
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default=False.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
harry0519/nsnqt | nsnqtlib/strategies/MACD.py | 1 | 5245 | # -*- coding:utf-8 -*-
import pandas as pd
import tushare as ts
import traceback
from nsnqtlib.config import DB_SERVER,DB_PORT,USER,PWD,AUTHDBNAME
from nsnqtlib.db.mongodb import MongoDB
class macd(object):
'''
���������������������
'''
def __init__(self):
self.db = MongoDB(DB_SERVER,DB_PORT,USER,PWD,AUTHDBNAME)
self.looplist = []
def setlooplist(self,lst=[]):
excludelst = ["399001.SZ","399006.SZ","000001.SH","000300.SH","399005.SZ"]
if not lst:
self.looplist = [i for i in self.db.getallcollections("stockdatas") if i not in excludelst]
else:
self.looplist = lst
return self.looplist
def buycondition(self,data):
'''out = ["macd","cross_dist"]'''
if data["macd"] > 0 and data["cross_dist"] == 0 \
and data["diff12_26"] < 0:
return True
return False
def enhancebuycondition(self,data):
if data["macd"] < 0 and data["diff12_26"] < 0:
return True
return False
def sellcondition(self,data):
pass
def getlastbuylst(self,):
dbname = "stockdatas"
num = 1
filt = [{"$sort":{"date":-1}},{"$limit":num}]
buylst = []
enhancebuylst = []
for i in self.looplist:
collection = i
query = self.db.read_data(dbname,collection,filt,is_aggregate=True)
data = [i for i in query]
if self.buycondition(data[0]):
buylst.append(i)
# print ("normal condition buy stock:{}".format(i))
query = self.db.read_data(dbname,collection,[{"$sort":{"date":-1}},{"$match":{"cross_dist":0,"macd":{"$ne":0}}},{"$limit":2}],is_aggregate=True)
newdata = [i for i in query]
if self.enhancebuycondition(newdata[1]):
enhancebuylst.append(i)
print ("enhance condition buy stock:{}".format(i))
return buylst,enhancebuylst
def gethistorybuylst(self,starttime="2011-01-01"):
dbname = "stockdatas"
filt = [{"$sort":{"date":1}},{"$match":{"date":{"$gt":starttime}}}]
buylst = []
enhancebuylst = []
for i in self.looplist:
collection = i
query = self.db.read_data(dbname,collection,filt,is_aggregate=True)
data = [i for i in query]
for record in data:
if self.buycondition(record):
buylst.append([i,record["date"],record["close"]])
# print ([i,record["date"],record["close"]])
idx = data.index(record)
precross = idx - data[idx-1]["cross_dist"] - 1
if precross >= 0 and self.enhancebuycondition(data[precross]):
enhancebuylst.append(i)
print ([i,record["date"],record["close"]])
return buylst,enhancebuylst
def gettraderecord(self,starttime="2011-01-01"):
pass
def getcurrentdata(self):
'''code:代码, name:名称 ,changepercent:涨跌幅 , trade:现价 ,open:开盘价 ,high:最高价, low:最低价, settlement:昨日收盘价 ,
volume:成交量 ,turnoverratio:换手率 ,amount:成交量 ,per:市盈率 ,pb:市净率, mktcap:总市值 ,nmc:流通市值
'''
out = ["code","trade","open","high","low","settlement"]
rst = ts.get_today_all()
return rst[out].set_index('code')
def gettradebuylst(self):
dbname = "stockdatas"
num = 1
emaslow = 26
emafast = 12
demday = 9
filt = [{"$sort":{"date":-1}},{"$limit":num}]
buylst = []
currentdata = self.getcurrentdata()
for i in self.looplist:
collection = i
query = self.db.read_data(dbname,collection,filt,is_aggregate=True)
data = [i for i in query]
s_ema = data[0]["ema26"]
f_ema = data[0]["ema12"]
dem = data[0]["dem9"]
macd = data[0]["macd"]
try:
c_close = currentdata["trade"].ix[collection.split(".")[0]]
n_s_ema = (s_ema*(emaslow-1)+ 2*c_close)/(emaslow+1)
n_f_ema = (f_ema*(emafast-1)+ 2*c_close)/(emafast+1)
n_diff = n_f_ema-n_s_ema
n_dem = (dem*(demday-1)+ 2*n_diff)/(demday+1)
n_macd = 2*(n_diff-n_dem)
if macd*n_macd < 0 and n_macd >0:
buylst.append(collection)
except:
print ("error stock:{}".format(collection))
[print ("buylist:{}".format(collection)) for collection in buylst]
return buylst
def save2db(self):
pass
def save2csv(self):
pass
if __name__ == '__main__':
s = macd()
s.setlooplist()
# s.getlastbuylst()
s.gettradebuylst()
# s.gethistorybuylst()
| bsd-2-clause |
iseokho/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
bugobliterator/ardupilot-chibios | Tools/LogAnalyzer/tests/TestOptFlow.py | 26 | 14969 | from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_ENABLE to 1 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
YihaoLu/statsmodels | statsmodels/examples/ex_kernel_test_functional_li_wang.py | 34 | 4162 | # -*- coding: utf-8 -*-
"""Example TestFForm with Li Wang DGP1
Created on Tue Jan 08 19:03:20 2013
Author: Josef Perktold
trying to replicate some examples in
Li, Q., and Suojin Wang. 1998. "A Simple Consistent Bootstrap Test for a
Parametric Regression Function."
Journal of Econometrics 87 (1) (November): 145-165.
doi:10.1016/S0304-4076(98)00011-6.
currently DGP1
Monte Carlo with 100 replications
---------------------------------
results
598948
time 11.1642833312
[-0.72505981 0.26514944 0.45681704]
[ 0.74884796 0.22005569 0.3004892 ]
reject at [0.2, 0.1, 0.05] (row 1: normal, row 2: bootstrap)
[[ 0.55 0.24 0.01]
[ 0.29 0.16 0.06]]
bw [ 0.11492364 0.11492364]
tst.test_stat -1.40274609515
Not Significant
tst.boots_results min, max -2.03386582198 2.32562183511
lower tail bootstrap p-value 0.077694235589
aymp.normal p-value (2-sided) 0.160692566481
mean and std in Li and Wang for n=1 are -0.764 and 0.621
results look reasonable now
Power
-----
true model: quadratic, estimated model: linear
498198
time 8.4588166674
[ 0.50374364 0.3991975 0.25373434]
[ 1.21353172 0.28669981 0.25461368]
reject at [0.2, 0.1, 0.05] (row 1: normal, row 2: bootstrap)
[[ 0.66 0.78 0.82]
[ 0.46 0.61 0.74]]
bw [ 0.11492364 0.11492364]
tst.test_stat 0.505426717024
Not Significant
tst.boots_results min, max -1.67050998463 3.39835350718
lower tail bootstrap p-value 0.892230576441
upper tail bootstrap p-value 0.107769423559
aymp.normal p-value (2-sided) 0.613259157709
aymp.normal p-value (upper) 0.306629578855
"""
from __future__ import print_function
if __name__ == '__main__':
import time
import numpy as np
from scipy import stats
from statsmodels.regression.linear_model import OLS
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
seed = np.random.randint(999999)
#seed = 661176
print(seed)
np.random.seed(seed)
sig_e = 0.1 #0.5 #0.1
nobs, k_vars = 100, 1
t0 = time.time()
b_res = []
for i in range(100):
x = np.random.uniform(0, 1, size=(nobs, k_vars))
x.sort(0)
order = 2
exog = x**np.arange(1, order + 1)
beta = np.array([2, -0.2])[:order+1-1] # 1. / np.arange(1, order + 2)
y_true = np.dot(exog, beta)
y = y_true + sig_e * np.random.normal(size=nobs)
endog = y
mod_ols = OLS(endog, exog[:,:1])
#res_ols = mod_ols.fit()
#'cv_ls'[1000, 0.5]
bw_lw = [1./np.sqrt(12.) * nobs**(-0.2)]*2 #(-1. / 5.)
tst = smke.TestFForm(endog, exog[:,:1], bw=bw_lw, var_type='c',
fform=lambda x,p: mod_ols.predict(p,x),
estimator=lambda y,x: OLS(y,x).fit().params,
nboot=399)
b_res.append([tst.test_stat,
stats.norm.sf(tst.test_stat),
(tst.boots_results > tst.test_stat).mean()])
t1 = time.time()
b_res = np.asarray(b_res)
print('time', (t1 - t0) / 60.)
print(b_res.mean(0))
print(b_res.std(0))
print('reject at [0.2, 0.1, 0.05] (row 1: normal, row 2: bootstrap)')
print((b_res[:,1:,None] >= [0.2, 0.1, 0.05]).mean(0))
print('bw', tst.bw)
print('tst.test_stat', tst.test_stat)
print(tst.sig)
print('tst.boots_results min, max', tst.boots_results.min(), tst.boots_results.max())
print('lower tail bootstrap p-value', (tst.boots_results < tst.test_stat).mean())
print('upper tail bootstrap p-value', (tst.boots_results >= tst.test_stat).mean())
from scipy import stats
print('aymp.normal p-value (2-sided)', stats.norm.sf(np.abs(tst.test_stat))*2)
print('aymp.normal p-value (upper)', stats.norm.sf(tst.test_stat))
res_ols = mod_ols.fit()
do_plot=True
if do_plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.')
plt.plot(x, res_ols.fittedvalues)
plt.title('OLS fit')
plt.figure()
plt.hist(tst.boots_results.ravel(), bins=20)
plt.title('bootstrap histogram or test statistic')
plt.show()
| bsd-3-clause |
chili-epfl/shape_learning | scripts/graph_generator.py | 3 | 12482 | # -*- coding: utf-8 -*-
"""
This script generates different kind of graphs outputs using reference shapes
and demo shapes recovered from log files
@author: ferran
"""
import string
import datetime
import glob
import os.path
import re
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import *
import inspect
from shape_learning.shape_modeler import ShapeModeler
"""
Recovers the specified demo shapes provided by the children
and stored in all log files based on specific period of time
"""
def getLogShapes(logDir, initDate, endDate, leterList, num_params):
initDate = datetime.datetime.strptime(initDate, "%d-%m-%Y")
endDate = datetime.datetime.strptime(endDate, "%d-%m-%Y")
logset = glob.glob(logDir + '/*.log')
logDate = []
for i in range(len(logset)):
logDate.append(find_between(logset[i], "es-", ".log"))
dates = [datetime.datetime.strptime(ts, "%d-%m-%Y") for ts in logDate]
dates.sort()
sortList = [datetime.datetime.strftime(ts, "%d-%m-%Y") for ts in dates]
alphabet = re.compile(regexp)
fields = {key: [] for key in leterList}
#Prepare the counter structure to keep where appears each shape occurence
sizeLog = len(logset)
count = {key: np.zeros(sizeLog) for key in leterList}
ind = 0
for fileDate in sortList:
logfile = logDir + "/shapes-" + fileDate + ".log"
#print('Evaluating date ... ' + logfile)
date = re.search("([0-9]{2}\-[0-9]{2}\-[0-9]{4})", logfile)
logDate = datetime.datetime.strptime(date.group(1), "%d-%m-%Y")
if (logDate >= initDate and logDate <= endDate):
#print('Opening ... ' + logfile)
#Process file
with open('%s' %logfile, 'r') as inF:
lineNumb = 1
for line in inF:
found = re.search(r'...new\b.*?\bdemonstration\b', line)
if found is not None:
leter = found.group(0)[0]
if alphabet.match(leter):
#print('Match found! ' + leter + ' in line ' + str(lineNumb))
#Keep the sum of each leter occurrence
count[leter][ind] = count[leter][ind] + 1
strShape = line.partition("Path: [")[-1].rpartition(']')[0]
shape = strShape.split(', ')
shape = np.array(map(float, shape))
#The points need to be reshaped to have format (140,1)
shape = np.reshape(shape, (-1, 1))
fields[leter].append(shape)
lineNumb = lineNumb + 1
else:
raise Exception("No log files among the entry dates")
ind = ind +1
return fields, count
"""
Returns the content between two strings
"""
def find_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
"""
Returns a list of ShapeModeler objects based on a regexp
"""
def prepareShapesModel(datasetDirectory, num_params):
shapes = {}
nameFilter = re.compile(regexp)
datasets = glob.glob(datasetDirectory + '/*.dat')
for dataset in datasets:
name = os.path.splitext(os.path.basename(dataset))[0]
if nameFilter.match(name):
shapeModeler = ShapeModeler(init_filename = dataset, num_principle_components=num_params)
shapes[name] = shapeModeler
return shapes
"""
Returns a dictionary composed by the Key {a,b,c,d...} and its reference shape
"""
def getRefShapes(shapesModel, leterList):
refShapes = {key: [] for key in leterList}
for leter in shapesModel:
refShape = np.reshape(shapesModel[leter].dataMat[0], (-1, 1))
refShapes[leter].append(refShape)
return refShapes
"""
Projects a set of shapes into the correspondent eigenspace
"""
def projectShape(shapesModel, shapes, num_params, leterList):
projShapes = {key: [] for key in leterList}
for leter in shapesModel:
for shape in shapes:
if leter == shape:
i=0
for sample in range(len(shapes[shape])):
projShape = shapesModel[leter].decomposeShape(shapes[leter][sample])
i = i+1
projShapes[leter].append(projShape[0][0:])
#print '%s: is present in both with %i samples' % (leter,i)
return projShapes
"""
Calculates the distance between the demo shapes and its references
returned in a date/shape sorted structure
"""
def calcDistance(projRefShapes, projDemoShapes, leterList):
distances = {key: [] for key in leterList}
for leter in projRefShapes:
refLeter = projRefShapes[leter]
for i in range(len(projDemoShapes[leter])):
demoLeter = projDemoShapes[leter][i]
dist = np.linalg.norm(demoLeter-refLeter)
distances[leter].append(dist)
return distances
"""
Returns the list leters based on the regular expression
"""
def getLeterList():
alphabetList = string.ascii_lowercase
alphabet = re.compile(regexp)
leterList = re.findall(alphabet, alphabetList)
return leterList
"""
Receives the 'dist' structure and plots for each type of shape, a graph
"""
def DistStaticEigen(datasetDirectory, logDir, num_params, initDate, endDate):
leterList = getLeterList()
#Prepare the Eigenspaces for each shape and store them
shapesModel = prepareShapesModel(datasetDirectory, num_params)
#Obtain the reference shapes and project them into the correspondent eigenspace
refShapes = getRefShapes(shapesModel, leterList)
projRefShapes = projectShape(shapesModel, refShapes, num_params, leterList)
#Obtain the demo shapes from the logs and project them into the eigenspace
demoShapes = getLogShapes(logDir, initDate, endDate, leterList, num_params)
projDemoShapes = projectShape(shapesModel, demoShapes[0], num_params, leterList)
#Calculate the distance between the same type of shape
distances = calcDistance(projRefShapes, projDemoShapes, leterList)
return distances
"""
Receives the 'dist' structure and plots for each type of shape, a graph
taking into account the dynamics of the eigenspace
"""
def DistDynEigen(datasetDirectory, logDir, num_params, initDate, endDate):
leterList = getLeterList()
distances = {key: [] for key in leterList}
#Prepare the INITIAL Eigenspaces for each shape and store them
shapesModel = prepareShapesModel(datasetDirectory, num_params)
#Obtain all demo leters
demoShapes = getLogShapes(logDir, initDate, endDate, leterList, num_params)
demo = demoShapes[0]
separator = demoShapes[1]
#Obtain all reference leters
refShapes = getRefShapes(shapesModel, leterList)
for leter in demo:
numShapes = len(demo[leter])
print '%i occurrences for letter %s' %(numShapes,leter)
for i in range(numShapes):
#Can be the case there is not dataset for a leter, in this case
# the reference shape for that leter will be empty
if len(refShapes[leter])>0:
#Project the demo leter
demoShape = {leter: []}
demoShape[leter].append(demo[leter][i])
projDemoShape = projectShape(shapesModel, demoShape, num_params, leterList)
#Project the reference leter
refShape = {leter: []}
refShape[leter].append(refShapes[leter][0])
projRefShape = projectShape(shapesModel, refShape, num_params, leterList)
#Calculate the distance between projections
singleDist = calcDistance(projRefShape, projDemoShape, leterList)
distances[leter].append(singleDist[leter][0])
#Realculate the eigenspace using the demo leter
shapesModel[leter].extendDataMat(demo[leter][i])
return distances, separator
"""
Generates as much plots as shapes the regexp indicates taking into account a
dynamic and static eigenspace
"""
def plotDistances():
leterList = getLeterList()
#Calculations to locate the subplots in the best way
numLeter=len(leterList)
col = np.floor(numLeter**0.5).astype(int)
row = np.ceil(1.*numLeter/col).astype(int)
print "col\t=\t%d\nrow\t=\t%d\ncol*row\t=\t%d\ntotal\t=\t%d" % (col,row,col*row,numLeter)
fig = plt.figure(1, figsize=(2.*col,2.*row))
subplots_adjust(hspace=0.000)
#Get the distances for Dynamic and Static Eigenspace
distDynEigen = DistDynEigen(datasetDirectory, logDir, num_params, initDate, endDate)
distStaticEigen = DistStaticEigen(datasetDirectory, logDir, num_params, initDate, endDate)
#For the vertical red lines
separator = distDynEigen[1]
distDynEigen = distDynEigen[0]
for i in range(1,numLeter+1):
ax = fig.add_subplot(row,col,i)
#Make the x-axis start at unit 1 for better understanding
ax.plot(range(1,len(distStaticEigen[leterList[i-1]])+1), distStaticEigen[leterList[i-1]], '-bo',
range(1,len(distDynEigen[leterList[i-1]])+1), distDynEigen[leterList[i-1]], '-go')
ax.set_title('Shape: ' + leterList[i-1])
plt.xlabel('Time progression')
plt.ylabel('dist. wrt. Ref')
plt.ylim([0,7])
numLogs = len(separator[leterList[i-1]])
sumat = 0
#The -1 avoids to put the last line
for j in range(numLogs-1):
vline = separator[leterList[i-1]][j]+sumat
#Paint the line if it is not at 0 or outside range
if vline > 0 and vline < len(distDynEigen[leterList[i-1]]):
axvline(vline+0.5, color='r')
sumat = separator[leterList[i-1]][j]
fig.set_tight_layout(True)
return fig
"""
Plots the all occurrences of a letter in the 3D space defined by the tree most
important eigenvalues
"""
def plot3D():
leterList = getLeterList()
#Prepare the Eigenspaces for each shape and store them
shapesModel = prepareShapesModel(datasetDirectory, num_params)
#Obtain the demo shapes from the logs and project them into the eigenspace
demoShapes = getLogShapes(logDir, initDate, endDate, leterList, num_params)
projDemoShapes = projectShape(shapesModel, demoShapes[0], num_params, leterList)
#Letter to plot the eigenspace and its instances projected on it
fig = plt.figure(2)
ax = fig.gca(projection='3d')
ax.set_title('Eigenspace of shape: ' + letter)
for i in range(len(projDemoShapes[letter])):
demoLeter = projDemoShapes[letter][i]
ax.scatter(demoLeter[0], demoLeter[1], demoLeter[2])
#Reference letter representing a high value in each of the important parameters
modelx = np.array([2.44,-0.08,-0.5])
modely = np.array([0.17,1.3,-0.06])
modelz = np.array([-2.1,-0.03,2.38])
ax.scatter(modelx[0], modelx[1], modelx[2], c='r')
ax.scatter(modely[0], modely[1], modely[2], c='r')
ax.scatter(modelz[0], modelz[1], modelz[2], c='r')
#Define names and limits
ax.set_xlabel('X axis - F1')
ax.set_ylabel('Y axis - F2')
ax.set_zlabel('Z axis - F3')
ax.set_xlim(-2.5,2.5)
ax.set_ylim(-2.3,1.3)
ax.set_zlim(-2.15,2.40)
return fig
#Values to modify if it is the case
letter = 'a'
regexp = '[a-z]'
fileName = inspect.getsourcefile(ShapeModeler);
installDirectory = fileName.split('/lib')[0];
initDate = '01-01-2000';
endDate = '01-01-2100';
num_params = 3
datasetDirectory = installDirectory + '/share/shape_learning/letter_model_datasets/alexis_set_for_children'; #uji_pen_subset
logDir = '../logs';
plotDistances()
plot3D()
plt.show() | isc |
vybstat/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.