repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
714
810k
license
stringclasses
15 values
shangwuhencc/scikit-learn
benchmarks/bench_glmnet.py
297
3848
""" To run this, you'll need to have installed. * glmnet-python * scikit-learn (of course) Does two benchmarks First, we fix a training set and increase the number of samples. Then we plot the computation time as function of the number of samples. In the second benchmark, we increase the number of dimensions of the training set. Then we plot the computation time as function of the number of dimensions. In both cases, only 10% of the features are informative. """ import numpy as np import gc from time import time from sklearn.datasets.samples_generator import make_regression alpha = 0.1 # alpha = 0.01 def rmse(a, b): return np.sqrt(np.mean((a - b) ** 2)) def bench(factory, X, Y, X_test, Y_test, ref_coef): gc.collect() # start time tstart = time() clf = factory(alpha=alpha).fit(X, Y) delta = (time() - tstart) # stop time print("duration: %0.3fs" % delta) print("rmse: %f" % rmse(Y_test, clf.predict(X_test))) print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean()) return delta if __name__ == '__main__': from glmnet.elastic_net import Lasso as GlmnetLasso from sklearn.linear_model import Lasso as ScikitLasso # Delayed import of pylab import pylab as pl scikit_results = [] glmnet_results = [] n = 20 step = 500 n_features = 1000 n_informative = n_features / 10 n_test_samples = 1000 for i in range(1, n + 1): print('==================') print('Iteration %s of %s' % (i, n)) print('==================') X, Y, coef_ = make_regression( n_samples=(i * step) + n_test_samples, n_features=n_features, noise=0.1, n_informative=n_informative, coef=True) X_test = X[-n_test_samples:] Y_test = Y[-n_test_samples:] X = X[:(i * step)] Y = Y[:(i * step)] print("benchmarking scikit-learn: ") scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_)) print("benchmarking glmnet: ") glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_)) pl.clf() xx = range(0, n * step, step) pl.title('Lasso regression on sample dataset (%d features)' % n_features) pl.plot(xx, scikit_results, 'b-', label='scikit-learn') pl.plot(xx, glmnet_results, 'r-', label='glmnet') pl.legend() pl.xlabel('number of samples to classify') pl.ylabel('Time (s)') pl.show() # now do a benchmark where the number of points is fixed # and the variable is the number of features scikit_results = [] glmnet_results = [] n = 20 step = 100 n_samples = 500 for i in range(1, n + 1): print('==================') print('Iteration %02d of %02d' % (i, n)) print('==================') n_features = i * step n_informative = n_features / 10 X, Y, coef_ = make_regression( n_samples=(i * step) + n_test_samples, n_features=n_features, noise=0.1, n_informative=n_informative, coef=True) X_test = X[-n_test_samples:] Y_test = Y[-n_test_samples:] X = X[:n_samples] Y = Y[:n_samples] print("benchmarking scikit-learn: ") scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_)) print("benchmarking glmnet: ") glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_)) xx = np.arange(100, 100 + n * step, step) pl.figure('scikit-learn vs. glmnet benchmark results') pl.title('Regression in high dimensional spaces (%d samples)' % n_samples) pl.plot(xx, scikit_results, 'b-', label='scikit-learn') pl.plot(xx, glmnet_results, 'r-', label='glmnet') pl.legend() pl.xlabel('number of features') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
zitouni/gnuradio-3.6.1
gr-utils/src/python/plot_psd_base.py
75
12725
#!/usr/bin/env python # # Copyright 2007,2008,2010,2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # try: import scipy from scipy import fftpack except ImportError: print "Please install SciPy to run this script (http://www.scipy.org/)" raise SystemExit, 1 try: from pylab import * except ImportError: print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)" raise SystemExit, 1 from optparse import OptionParser from scipy import log10 from gnuradio.eng_option import eng_option class plot_psd_base: def __init__(self, datatype, filename, options): self.hfile = open(filename, "r") self.block_length = options.block self.start = options.start self.sample_rate = options.sample_rate self.psdfftsize = options.psd_size self.specfftsize = options.spec_size self.dospec = options.enable_spec # if we want to plot the spectrogram self.datatype = getattr(scipy, datatype) #scipy.complex64 self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file self.axis_font_size = 16 self.label_font_size = 18 self.title_font_size = 20 self.text_size = 22 # Setup PLOT self.fig = figure(1, figsize=(16, 12), facecolor='w') rcParams['xtick.labelsize'] = self.axis_font_size rcParams['ytick.labelsize'] = self.axis_font_size self.text_file = figtext(0.10, 0.95, ("File: %s" % filename), weight="heavy", size=self.text_size) self.text_file_pos = figtext(0.10, 0.92, "File Position: ", weight="heavy", size=self.text_size) self.text_block = figtext(0.35, 0.92, ("Block Size: %d" % self.block_length), weight="heavy", size=self.text_size) self.text_sr = figtext(0.60, 0.915, ("Sample Rate: %.2f" % self.sample_rate), weight="heavy", size=self.text_size) self.make_plots() self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True) self.button_left = Button(self.button_left_axes, "<") self.button_left_callback = self.button_left.on_clicked(self.button_left_click) self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True) self.button_right = Button(self.button_right_axes, ">") self.button_right_callback = self.button_right.on_clicked(self.button_right_click) self.xlim = scipy.array(self.sp_iq.get_xlim()) self.manager = get_current_fig_manager() connect('draw_event', self.zoom) connect('key_press_event', self.click) show() def get_data(self): self.position = self.hfile.tell()/self.sizeof_data self.text_file_pos.set_text("File Position: %d" % self.position) try: self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length) except MemoryError: print "End of File" return False else: # retesting length here as newer version of scipy does not throw a MemoryError, just # returns a zero-length array if(len(self.iq) > 0): tstep = 1.0 / self.sample_rate #self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))]) self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))]) self.iq_psd, self.freq = self.dopsd(self.iq) return True else: print "End of File" return False def dopsd(self, iq): ''' Need to do this here and plot later so we can do the fftshift ''' overlap = self.psdfftsize/4 winfunc = scipy.blackman psd,freq = mlab.psd(iq, self.psdfftsize, self.sample_rate, window = lambda d: d*winfunc(self.psdfftsize), noverlap = overlap) psd = 10.0*log10(abs(psd)) return (psd, freq) def make_plots(self): # if specified on the command-line, set file pointer self.hfile.seek(self.sizeof_data*self.start, 1) iqdims = [[0.075, 0.2, 0.4, 0.6], [0.075, 0.55, 0.4, 0.3]] psddims = [[0.575, 0.2, 0.4, 0.6], [0.575, 0.55, 0.4, 0.3]] specdims = [0.2, 0.125, 0.6, 0.3] # Subplot for real and imaginary parts of signal self.sp_iq = self.fig.add_subplot(2,2,1, position=iqdims[self.dospec]) self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold") self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold") self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold") # Subplot for PSD plot self.sp_psd = self.fig.add_subplot(2,2,2, position=psddims[self.dospec]) self.sp_psd.set_title(("PSD"), fontsize=self.title_font_size, fontweight="bold") self.sp_psd.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold") self.sp_psd.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold") r = self.get_data() self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags self.draw_time(self.time, self.iq) # draw the plot self.plot_psd = self.sp_psd.plot([], 'b') # make plot for PSD self.draw_psd(self.freq, self.iq_psd) # draw the plot if self.dospec: # Subplot for spectrogram plot self.sp_spec = self.fig.add_subplot(2,2,3, position=specdims) self.sp_spec.set_title(("Spectrogram"), fontsize=self.title_font_size, fontweight="bold") self.sp_spec.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold") self.sp_spec.set_ylabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold") self.draw_spec(self.time, self.iq) draw() def draw_time(self, t, iq): reals = iq.real imags = iq.imag self.plot_iq[0].set_data([t, reals]) self.plot_iq[1].set_data([t, imags]) self.sp_iq.set_xlim(t.min(), t.max()) self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]), 1.5*max([reals.max(), imags.max()])]) def draw_psd(self, f, p): self.plot_psd[0].set_data([f, p]) self.sp_psd.set_ylim([p.min()-10, p.max()+10]) self.sp_psd.set_xlim([f.min(), f.max()]) def draw_spec(self, t, s): overlap = self.specfftsize/4 winfunc = scipy.blackman self.sp_spec.clear() self.sp_spec.specgram(s, self.specfftsize, self.sample_rate, window = lambda d: d*winfunc(self.specfftsize), noverlap = overlap, xextent=[t.min(), t.max()]) def update_plots(self): self.draw_time(self.time, self.iq) self.draw_psd(self.freq, self.iq_psd) if self.dospec: self.draw_spec(self.time, self.iq) self.xlim = scipy.array(self.sp_iq.get_xlim()) # so zoom doesn't get called draw() def zoom(self, event): newxlim = scipy.array(self.sp_iq.get_xlim()) curxlim = scipy.array(self.xlim) if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]): #xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position)))) #xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq)) xmin = max(0, int(ceil(self.sample_rate*(newxlim[0])))) xmax = min(int(ceil(self.sample_rate*(newxlim[1]))), len(self.iq)) iq = scipy.array(self.iq[xmin : xmax]) time = scipy.array(self.time[xmin : xmax]) iq_psd, freq = self.dopsd(iq) self.draw_psd(freq, iq_psd) self.xlim = scipy.array(self.sp_iq.get_xlim()) draw() def click(self, event): forward_valid_keys = [" ", "down", "right"] backward_valid_keys = ["up", "left"] if(find(event.key, forward_valid_keys)): self.step_forward() elif(find(event.key, backward_valid_keys)): self.step_backward() def button_left_click(self, event): self.step_backward() def button_right_click(self, event): self.step_forward() def step_forward(self): r = self.get_data() if(r): self.update_plots() def step_backward(self): # Step back in file position if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ): self.hfile.seek(-2*self.sizeof_data*self.block_length, 1) else: self.hfile.seek(-self.hfile.tell(),1) r = self.get_data() if(r): self.update_plots() @staticmethod def setup_options(): usage="%prog: [options] input_filename" description = "Takes a GNU Radio binary file (with specified data type using --data-type) and displays the I&Q data versus time as well as the power spectral density (PSD) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples. Finally, the size of the FFT to use for the PSD and spectrogram plots can be set independently with --psd-size and --spec-size, respectively. The spectrogram plot does not display by default and is turned on with -S or --enable-spec." parser = OptionParser(option_class=eng_option, conflict_handler="resolve", usage=usage, description=description) parser.add_option("-d", "--data-type", type="string", default="complex64", help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]") parser.add_option("-B", "--block", type="int", default=8192, help="Specify the block size [default=%default]") parser.add_option("-s", "--start", type="int", default=0, help="Specify where to start in the file [default=%default]") parser.add_option("-R", "--sample-rate", type="eng_float", default=1.0, help="Set the sampler rate of the data [default=%default]") parser.add_option("", "--psd-size", type="int", default=1024, help="Set the size of the PSD FFT [default=%default]") parser.add_option("", "--spec-size", type="int", default=256, help="Set the size of the spectrogram FFT [default=%default]") parser.add_option("-S", "--enable-spec", action="store_true", default=False, help="Turn on plotting the spectrogram [default=%default]") return parser def find(item_in, list_search): try: return list_search.index(item_in) != None except ValueError: return False def main(): parser = plot_psd_base.setup_options() (options, args) = parser.parse_args () if len(args) != 1: parser.print_help() raise SystemExit, 1 filename = args[0] dc = plot_psd_base(options.data_type, filename, options) if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
gpl-3.0
YihaoLu/statsmodels
statsmodels/genmod/cov_struct.py
19
46892
from statsmodels.compat.python import iterkeys, itervalues, zip, range from statsmodels.stats.correlation_tools import cov_nearest import numpy as np import pandas as pd from scipy import linalg as spl from collections import defaultdict from statsmodels.tools.sm_exceptions import (ConvergenceWarning, IterationLimitWarning) import warnings """ Some details for the covariance calculations can be found in the Stata docs: http://www.stata.com/manuals13/xtxtgee.pdf """ class CovStruct(object): """ A base class for correlation and covariance structures of grouped data. Each implementation of this class takes the residuals from a regression model that has been fitted to grouped data, and uses them to estimate the within-group dependence structure of the random errors in the model. The state of the covariance structure is represented through the value of the class variable `dep_params`. The default state of a newly-created instance should correspond to the identity correlation matrix. """ def __init__(self, cov_nearest_method="clipped"): # Parameters describing the dependency structure self.dep_params = None # Keep track of the number of times that the covariance was # adjusted. self.cov_adjust = [] # Method for projecting the covariance matrix if it not SPD. self.cov_nearest_method = cov_nearest_method def initialize(self, model): """ Called by GEE, used by implementations that need additional setup prior to running `fit`. Parameters ---------- model : GEE class A reference to the parent GEE class instance. """ self.model = model def update(self, params): """ Updates the association parameter values based on the current regression coefficients. Parameters ---------- params : array-like Working values for the regression parameters. """ raise NotImplementedError def covariance_matrix(self, endog_expval, index): """ Returns the working covariance or correlation matrix for a given cluster of data. Parameters ---------- endog_expval: array-like The expected values of endog for the cluster for which the covariance or correlation matrix will be returned index: integer The index of the cluster for which the covariane or correlation matrix will be returned Returns ------- M: matrix The covariance or correlation matrix of endog is_cor: bool True if M is a correlation matrix, False if M is a covariance matrix """ raise NotImplementedError def covariance_matrix_solve(self, expval, index, stdev, rhs): """ Solves matrix equations of the form `covmat * soln = rhs` and returns the values of `soln`, where `covmat` is the covariance matrix represented by this class. Parameters ---------- expval: array-like The expected value of endog for each observed value in the group. index: integer The group index. stdev : array-like The standard deviation of endog for each observation in the group. rhs : list/tuple of array-like A set of right-hand sides; each defines a matrix equation to be solved. Returns ------- soln : list/tuple of array-like The solutions to the matrix equations. Notes ----- Returns None if the solver fails. Some dependence structures do not use `expval` and/or `index` to determine the correlation matrix. Some families (e.g. binomial) do not use the `stdev` parameter when forming the covariance matrix. If the covariance matrix is singular or not SPD, it is projected to the nearest such matrix. These projection events are recorded in the fit_history member of the GEE model. Systems of linear equations with the covariance matrix as the left hand side (LHS) are solved for different right hand sides (RHS); the LHS is only factorized once to save time. This is a default implementation, it can be reimplemented in subclasses to optimize the linear algebra according to the struture of the covariance matrix. """ vmat, is_cor = self.covariance_matrix(expval, index) if is_cor: vmat *= np.outer(stdev, stdev) # Factor the covariance matrix. If the factorization fails, # attempt to condition it into a factorizable matrix. threshold = 1e-2 success = False cov_adjust = 0 for itr in range(20): try: vco = spl.cho_factor(vmat) success = True break except np.linalg.LinAlgError: vmat = cov_nearest(vmat, method=self.cov_nearest_method, threshold=threshold) threshold *= 2 cov_adjust += 1 self.cov_adjust.append(cov_adjust) # Last resort if we still can't factor the covariance matrix. if success == False: warnings.warn("Unable to condition covariance matrix to an SPD matrix using cov_nearest", ConvergenceWarning) vmat = np.diag(np.diag(vmat)) vco = spl.cho_factor(vmat) soln = [spl.cho_solve(vco, x) for x in rhs] return soln def summary(self): """ Returns a text summary of the current estimate of the dependence structure. """ raise NotImplementedError class Independence(CovStruct): """ An independence working dependence structure. """ # Nothing to update def update(self, params): return def covariance_matrix(self, expval, index): dim = len(expval) return np.eye(dim, dtype=np.float64), True def covariance_matrix_solve(self, expval, index, stdev, rhs): v = stdev**2 rslt = [] for x in rhs: if x.ndim == 1: rslt.append(x / v) else: rslt.append(x / v[:, None]) return rslt update.__doc__ = CovStruct.update.__doc__ covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__ covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__ def summary(self): return "Observations within a cluster are modeled as being independent." class Exchangeable(CovStruct): """ An exchangeable working dependence structure. """ def __init__(self): super(Exchangeable, self).__init__() # The correlation between any two values in the same cluster self.dep_params = 0. def update(self, params): endog = self.model.endog_li nobs = self.model.nobs varfunc = self.model.family.variance cached_means = self.model.cached_means has_weights = self.model.weights is not None weights_li = self.model.weights residsq_sum, scale = 0, 0 fsum1, fsum2, n_pairs = 0., 0., 0. for i in range(self.model.num_group): expval, _ = cached_means[i] stdev = np.sqrt(varfunc(expval)) resid = (endog[i] - expval) / stdev f = weights_li[i] if has_weights else 1. ngrp = len(resid) residsq = np.outer(resid, resid) scale += f * np.trace(residsq) fsum1 += f * len(endog[i]) residsq = np.tril(residsq, -1) residsq_sum += f * residsq.sum() npr = 0.5 * ngrp * (ngrp - 1) fsum2 += f * npr n_pairs += npr ddof = self.model.ddof_scale scale /= (fsum1 * (nobs - ddof) / float(nobs)) residsq_sum /= scale self.dep_params = residsq_sum / (fsum2 * (n_pairs - ddof) / float(n_pairs)) def covariance_matrix(self, expval, index): dim = len(expval) dp = self.dep_params * np.ones((dim, dim), dtype=np.float64) np.fill_diagonal(dp, 1) return dp, True def covariance_matrix_solve(self, expval, index, stdev, rhs): k = len(expval) c = self.dep_params / (1. - self.dep_params) c /= 1. + self.dep_params * (k - 1) rslt = [] for x in rhs: if x.ndim == 1: x1 = x / stdev y = x1 / (1. - self.dep_params) y -= c * sum(x1) y /= stdev else: x1 = x / stdev[:, None] y = x1 / (1. - self.dep_params) y -= c * x1.sum(0) y /= stdev[:, None] rslt.append(y) return rslt update.__doc__ = CovStruct.update.__doc__ covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__ covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__ def summary(self): return ("The correlation between two observations in the " + "same cluster is %.3f" % self.dep_params) class Nested(CovStruct): """ A nested working dependence structure. A working dependence structure that captures a nested hierarchy of groups, each level of which contributes to the random error term of the model. When using this working covariance structure, `dep_data` of the GEE instance should contain a n_obs x k matrix of 0/1 indicators, corresponding to the k subgroups nested under the top-level `groups` of the GEE instance. These subgroups should be nested from left to right, so that two observations with the same value for column j of `dep_data` should also have the same value for all columns j' < j (this only applies to observations in the same top-level cluster given by the `groups` argument to GEE). Examples -------- Suppose our data are student test scores, and the students are in classrooms, nested in schools, nested in school districts. The school district is the highest level of grouping, so the school district id would be provided to GEE as `groups`, and the school and classroom id's would be provided to the Nested class as the `dep_data` argument, e.g. 0 0 # School 0, classroom 0, student 0 0 0 # School 0, classroom 0, student 1 0 1 # School 0, classroom 1, student 0 0 1 # School 0, classroom 1, student 1 1 0 # School 1, classroom 0, student 0 1 0 # School 1, classroom 0, student 1 1 1 # School 1, classroom 1, student 0 1 1 # School 1, classroom 1, student 1 Labels lower in the hierarchy are recycled, so that student 0 in classroom 0 is different fro student 0 in classroom 1, etc. Notes ----- The calculations for this dependence structure involve all pairs of observations within a group (that is, within the top level `group` structure passed to GEE). Large group sizes will result in slow iterations. The variance components are estimated using least squares regression of the products r*r', for standardized residuals r and r' in the same group, on a vector of indicators defining which variance components are shared by r and r'. """ def initialize(self, model): """ Called on the first call to update `ilabels` is a list of n_i x n_i matrices containing integer labels that correspond to specific correlation parameters. Two elements of ilabels[i] with the same label share identical variance components. `designx` is a matrix, with each row containing dummy variables indicating which variance components are associated with the corresponding element of QY. """ super(Nested, self).initialize(model) if self.model.weights is not None: warnings.warn("weights not implemented for nested cov_struct, using unweighted covariance estimate") # A bit of processing of the nest data id_matrix = np.asarray(self.model.dep_data) if id_matrix.ndim == 1: id_matrix = id_matrix[:,None] self.id_matrix = id_matrix endog = self.model.endog_li designx, ilabels = [], [] # The number of layers of nesting n_nest = self.id_matrix.shape[1] for i in range(self.model.num_group): ngrp = len(endog[i]) glab = self.model.group_labels[i] rix = self.model.group_indices[glab] # Determine the number of common variance components # shared by each pair of observations. ix1, ix2 = np.tril_indices(ngrp, -1) ncm = (self.id_matrix[rix[ix1], :] == self.id_matrix[rix[ix2], :]).sum(1) # This is used to construct the working correlation # matrix. ilabel = np.zeros((ngrp, ngrp), dtype=np.int32) ilabel[[ix1, ix2]] = ncm + 1 ilabel[[ix2, ix1]] = ncm + 1 ilabels.append(ilabel) # This is used to estimate the variance components. dsx = np.zeros((len(ix1), n_nest+1), dtype=np.float64) dsx[:,0] = 1 for k in np.unique(ncm): ii = np.flatnonzero(ncm == k) dsx[ii, 1:k+1] = 1 designx.append(dsx) self.designx = np.concatenate(designx, axis=0) self.ilabels = ilabels svd = np.linalg.svd(self.designx, 0) self.designx_u = svd[0] self.designx_s = svd[1] self.designx_v = svd[2].T def update(self, params): endog = self.model.endog_li nobs = self.model.nobs dim = len(params) if self.designx is None: self._compute_design(self.model) cached_means = self.model.cached_means varfunc = self.model.family.variance dvmat = [] scale = 0. for i in range(self.model.num_group): expval, _ = cached_means[i] stdev = np.sqrt(varfunc(expval)) resid = (endog[i] - expval) / stdev ix1, ix2 = np.tril_indices(len(resid), -1) dvmat.append(resid[ix1] * resid[ix2]) scale += np.sum(resid**2) dvmat = np.concatenate(dvmat) scale /= (nobs - dim) # Use least squares regression to estimate the variance # components vcomp_coeff = np.dot(self.designx_v, np.dot(self.designx_u.T, dvmat) / self.designx_s) self.vcomp_coeff = np.clip(vcomp_coeff, 0, np.inf) self.scale = scale self.dep_params = self.vcomp_coeff.copy() def covariance_matrix(self, expval, index): dim = len(expval) # First iteration if self.dep_params is None: return np.eye(dim, dtype=np.float64), True ilabel = self.ilabels[index] c = np.r_[self.scale, np.cumsum(self.vcomp_coeff)] vmat = c[ilabel] vmat /= self.scale return vmat, True update.__doc__ = CovStruct.update.__doc__ covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__ def summary(self): """ Returns a summary string describing the state of the dependence structure. """ msg = "Variance estimates\n------------------\n" for k in range(len(self.vcomp_coeff)): msg += "Component %d: %.3f\n" % (k+1, self.vcomp_coeff[k]) msg += "Residual: %.3f\n" % (self.scale - np.sum(self.vcomp_coeff)) return msg class Stationary(CovStruct): """ A stationary covariance structure. The correlation between two observations is an arbitrary function of the distance between them. Distances up to a given maximum value are included in the covariance model. Parameters ---------- max_lag : float The largest distance that is included in the covariance model. grid : bool If True, the index positions in the data (after dropping missing values) are used to define distances, and the `time` variable is ignored. """ def __init__(self, max_lag=1, grid=False): super(Stationary, self).__init__() self.max_lag = max_lag self.grid = grid self.dep_params = np.zeros(max_lag) def initialize(self, model): super(Stationary, self).initialize(model) # Time used as an index needs to be integer type. if self.grid == False: time = self.model.time[:, 0].astype(np.int32) self.time = self.model.cluster_list(time) def update(self, params): if self.grid: self.update_grid(params) else: self.update_nogrid(params) def update_grid(self, params): endog = self.model.endog_li cached_means = self.model.cached_means varfunc = self.model.family.variance dep_params = np.zeros(self.max_lag + 1) for i in range(self.model.num_group): expval, _ = cached_means[i] stdev = np.sqrt(varfunc(expval)) resid = (endog[i] - expval) / stdev dep_params[0] += np.sum(resid * resid) / len(resid) for j in range(1, self.max_lag + 1): dep_params[j] += np.sum(resid[0:-j] * resid[j:]) / len(resid[j:]) self.dep_params = dep_params[1:] / dep_params[0] def update_nogrid(self, params): endog = self.model.endog_li cached_means = self.model.cached_means varfunc = self.model.family.variance dep_params = np.zeros(self.max_lag + 1) dn = np.zeros(self.max_lag + 1) for i in range(self.model.num_group): expval, _ = cached_means[i] stdev = np.sqrt(varfunc(expval)) resid = (endog[i] - expval) / stdev j1, j2 = np.tril_indices(len(expval)) dx = np.abs(self.time[i][j1] - self.time[i][j2]) ii = np.flatnonzero(dx <= self.max_lag) j1 = j1[ii] j2 = j2[ii] dx = dx[ii] vs = np.bincount(dx, weights=resid[j1] * resid[j2], minlength=self.max_lag+1) vd = np.bincount(dx, minlength=self.max_lag+1) ii = np.flatnonzero(vd > 0) dn[ii] += 1 if len(ii) > 0: dep_params[ii] += vs[ii] / vd[ii] dep_params /= dn self.dep_params = dep_params[1:] / dep_params[0] def covariance_matrix(self, endog_expval, index): if self.grid: return self.covariance_matrix_grid(endog_expal, index) j1, j2 = np.tril_indices(len(endog_expval)) dx = np.abs(self.time[index][j1] - self.time[index][j2]) ii = np.flatnonzero((0 < dx) & (dx <= self.max_lag)) j1 = j1[ii] j2 = j2[ii] dx = dx[ii] cmat = np.eye(len(endog_expval)) cmat[j1, j2] = self.dep_params[dx - 1] cmat[j2, j1] = self.dep_params[dx - 1] return cmat, True def covariance_matrix_grid(self, endog_expval, index): from scipy.linalg import toeplitz r = np.zeros(len(endog_expval)) r[0] = 1 r[1:self.max_lag + 1] = self.dep_params return toeplitz(r), True def covariance_matrix_solve(self, expval, index, stdev, rhs): if self.grid == False: return super(Stationary, self).covariance_matrix_solve(expval, index, stdev, rhs) from statsmodels.tools.linalg import stationary_solve r = np.zeros(len(expval)) r[0:self.max_lag] = self.dep_params return [stationary_solve(r, x) for x in rhs] update.__doc__ = CovStruct.update.__doc__ covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__ covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__ def summary(self): return ("Stationary dependence parameters\n", self.dep_params) class Autoregressive(CovStruct): """ A first-order autoregressive working dependence structure. The dependence is defined in terms of the `time` component of the parent GEE class, which defaults to the index position of each value within its cluster, based on the order of values in the input data set. Time represents a potentially multidimensional index from which distances between pairs of observations can be determined. The correlation between two observations in the same cluster is dep_params^distance, where `dep_params` contains the (scalar) autocorrelation parameter to be estimated, and `distance` is the distance between the two observations, calculated from their corresponding time values. `time` is stored as an n_obs x k matrix, where `k` represents the number of dimensions in the time index. The autocorrelation parameter is estimated using weighted nonlinear least squares, regressing each value within a cluster on each preceeding value in the same cluster. Parameters ---------- dist_func: function from R^k x R^k to R^+, optional A function that computes the distance between the two observations based on their `time` values. References ---------- B Rosner, A Munoz. Autoregressive modeling for the analysis of longitudinal data with unequally spaced examinations. Statistics in medicine. Vol 7, 59-71, 1988. """ def __init__(self, dist_func=None): super(Autoregressive, self).__init__() # The function for determining distances based on time if dist_func is None: self.dist_func = lambda x, y: np.abs(x - y).sum() else: self.dist_func = dist_func self.designx = None # The autocorrelation parameter self.dep_params = 0. def update(self, params): if self.model.weights is not None: warnings.warn("weights not implemented for autoregressive cov_struct, using unweighted covariance estimate") endog = self.model.endog_li time = self.model.time_li # Only need to compute this once if self.designx is not None: designx = self.designx else: designx = [] for i in range(self.model.num_group): ngrp = len(endog[i]) if ngrp == 0: continue # Loop over pairs of observations within a cluster for j1 in range(ngrp): for j2 in range(j1): designx.append(self.dist_func(time[i][j1, :], time[i][j2, :])) designx = np.array(designx) self.designx = designx scale = self.model.estimate_scale() varfunc = self.model.family.variance cached_means = self.model.cached_means # Weights var = 1. - self.dep_params**(2*designx) var /= 1. - self.dep_params**2 wts = 1. / var wts /= wts.sum() residmat = [] for i in range(self.model.num_group): expval, _ = cached_means[i] stdev = np.sqrt(scale * varfunc(expval)) resid = (endog[i] - expval) / stdev ngrp = len(resid) for j1 in range(ngrp): for j2 in range(j1): residmat.append([resid[j1], resid[j2]]) residmat = np.array(residmat) # Need to minimize this def fitfunc(a): dif = residmat[:, 0] - (a**designx)*residmat[:, 1] return np.dot(dif**2, wts) # Left bracket point b_lft, f_lft = 0., fitfunc(0.) # Center bracket point b_ctr, f_ctr = 0.5, fitfunc(0.5) while f_ctr > f_lft: b_ctr /= 2 f_ctr = fitfunc(b_ctr) if b_ctr < 1e-8: self.dep_params = 0 return # Right bracket point b_rgt, f_rgt = 0.75, fitfunc(0.75) while f_rgt < f_ctr: b_rgt = b_rgt + (1. - b_rgt) / 2 f_rgt = fitfunc(b_rgt) if b_rgt > 1. - 1e-6: raise ValueError( "Autoregressive: unable to find right bracket") from scipy.optimize import brent self.dep_params = brent(fitfunc, brack=[b_lft, b_ctr, b_rgt]) def covariance_matrix(self, endog_expval, index): ngrp = len(endog_expval) if self.dep_params == 0: return np.eye(ngrp, dtype=np.float64), True idx = np.arange(ngrp) cmat = self.dep_params**np.abs(idx[:, None] - idx[None, :]) return cmat, True def covariance_matrix_solve(self, expval, index, stdev, rhs): # The inverse of an AR(1) covariance matrix is tri-diagonal. k = len(expval) soln = [] # LHS has 1 column if k == 1: return [x / stdev**2 for x in rhs] # LHS has 2 columns if k == 2: mat = np.array([[1, -self.dep_params], [-self.dep_params, 1]]) mat /= (1. - self.dep_params**2) for x in rhs: if x.ndim == 1: x1 = x / stdev else: x1 = x / stdev[:, None] x1 = np.dot(mat, x1) if x.ndim == 1: x1 /= stdev else: x1 /= stdev[:, None] soln.append(x1) return soln # LHS has >= 3 columns: values c0, c1, c2 defined below give # the inverse. c0 is on the diagonal, except for the first # and last position. c1 is on the first and last position of # the diagonal. c2 is on the sub/super diagonal. c0 = (1. + self.dep_params**2) / (1. - self.dep_params**2) c1 = 1. / (1. - self.dep_params**2) c2 = -self.dep_params / (1. - self.dep_params**2) soln = [] for x in rhs: flatten = False if x.ndim == 1: x = x[:, None] flatten = True x1 = x / stdev[:, None] z0 = np.zeros((1, x.shape[1])) rhs1 = np.concatenate((x[1:,:], z0), axis=0) rhs2 = np.concatenate((z0, x[0:-1,:]), axis=0) y = c0*x + c2*rhs1 + c2*rhs2 y[0, :] = c1*x[0, :] + c2*x[1, :] y[-1, :] = c1*x[-1, :] + c2*x[-2, :] y /= stdev[:, None] if flatten: y = np.squeeze(y) soln.append(y) return soln update.__doc__ = CovStruct.update.__doc__ covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__ covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__ def summary(self): return ("Autoregressive(1) dependence parameter: %.3f\n" % self.dep_params) class CategoricalCovStruct(CovStruct): """ Parent class for covariance structure for categorical data models. Attributes ---------- nlevel : int The number of distinct levels for the outcome variable. ibd : list A list whose i^th element ibd[i] is an array whose rows contain integer pairs (a,b), where endog_li[i][a:b] is the subvector of binary indicators derived from the same ordinal value. """ def initialize(self, model): super(CategoricalCovStruct, self).initialize(model) self.nlevel = len(model.endog_values) self._ncut = self.nlevel - 1 from numpy.lib.stride_tricks import as_strided b = np.dtype(np.int64).itemsize ibd = [] for v in model.endog_li: jj = np.arange(0, len(v) + 1, self._ncut, dtype=np.int64) jj = as_strided(jj, shape=(len(jj) - 1, 2), strides=(b, b)) ibd.append(jj) self.ibd = ibd class GlobalOddsRatio(CategoricalCovStruct): """ Estimate the global odds ratio for a GEE with ordinal or nominal data. References ---------- PJ Heagerty and S Zeger. "Marginal Regression Models for Clustered Ordinal Measurements". Journal of the American Statistical Association Vol. 91, Issue 435 (1996). Thomas Lumley. Generalized Estimating Equations for Ordinal Data: A Note on Working Correlation Structures. Biometrics Vol. 52, No. 1 (Mar., 1996), pp. 354-361 http://www.jstor.org/stable/2533173 Notes ----- The following data structures are calculated in the class: 'ibd' is a list whose i^th element ibd[i] is a sequence of integer pairs (a,b), where endog_li[i][a:b] is the subvector of binary indicators derived from the same ordinal value. `cpp` is a dictionary where cpp[group] is a map from cut-point pairs (c,c') to the indices of all between-subject pairs derived from the given cut points. """ def __init__(self, endog_type): super(GlobalOddsRatio, self).__init__() self.endog_type = endog_type self.dep_params = 0. def initialize(self, model): super(GlobalOddsRatio, self).initialize(model) if self.model.weights is not None: warnings.warn("weights not implemented for GlobalOddsRatio cov_struct, using unweighted covariance estimate") # Need to restrict to between-subject pairs cpp = [] for v in model.endog_li: # Number of subjects in this group m = int(len(v) / self._ncut) i1, i2 = np.tril_indices(m, -1) cpp1 = {} for k1 in range(self._ncut): for k2 in range(k1+1): jj = np.zeros((len(i1), 2), dtype=np.int64) jj[:, 0] = i1*self._ncut + k1 jj[:, 1] = i2*self._ncut + k2 cpp1[(k2, k1)] = jj cpp.append(cpp1) self.cpp = cpp # Initialize the dependence parameters self.crude_or = self.observed_crude_oddsratio() if self.model.update_dep: self.dep_params = self.crude_or def pooled_odds_ratio(self, tables): """ Returns the pooled odds ratio for a list of 2x2 tables. The pooled odds ratio is the inverse variance weighted average of the sample odds ratios of the tables. """ if len(tables) == 0: return 1. # Get the sampled odds ratios and variances log_oddsratio, var = [], [] for table in tables: lor = np.log(table[1, 1]) + np.log(table[0, 0]) -\ np.log(table[0, 1]) - np.log(table[1, 0]) log_oddsratio.append(lor) var.append((1 / table.astype(np.float64)).sum()) # Calculate the inverse variance weighted average wts = [1 / v for v in var] wtsum = sum(wts) wts = [w / wtsum for w in wts] log_pooled_or = sum([w*e for w, e in zip(wts, log_oddsratio)]) return np.exp(log_pooled_or) def covariance_matrix(self, expected_value, index): vmat = self.get_eyy(expected_value, index) vmat -= np.outer(expected_value, expected_value) return vmat, False def observed_crude_oddsratio(self): """ To obtain the crude (global) odds ratio, first pool all binary indicators corresponding to a given pair of cut points (c,c'), then calculate the odds ratio for this 2x2 table. The crude odds ratio is the inverse variance weighted average of these odds ratios. Since the covariate effects are ignored, this OR will generally be greater than the stratified OR. """ cpp = self.cpp endog = self.model.endog_li # Storage for the contingency tables for each (c,c') tables = {} for ii in iterkeys(cpp[0]): tables[ii] = np.zeros((2, 2), dtype=np.float64) # Get the observed crude OR for i in range(len(endog)): # The observed joint values for the current cluster yvec = endog[i] endog_11 = np.outer(yvec, yvec) endog_10 = np.outer(yvec, 1. - yvec) endog_01 = np.outer(1. - yvec, yvec) endog_00 = np.outer(1. - yvec, 1. - yvec) cpp1 = cpp[i] for ky in iterkeys(cpp1): ix = cpp1[ky] tables[ky][1, 1] += endog_11[ix[:, 0], ix[:, 1]].sum() tables[ky][1, 0] += endog_10[ix[:, 0], ix[:, 1]].sum() tables[ky][0, 1] += endog_01[ix[:, 0], ix[:, 1]].sum() tables[ky][0, 0] += endog_00[ix[:, 0], ix[:, 1]].sum() return self.pooled_odds_ratio(list(itervalues(tables))) def get_eyy(self, endog_expval, index): """ Returns a matrix V such that V[i,j] is the joint probability that endog[i] = 1 and endog[j] = 1, based on the marginal probabilities of endog and the global odds ratio `current_or`. """ current_or = self.dep_params ibd = self.ibd[index] # The between-observation joint probabilities if current_or == 1.0: vmat = np.outer(endog_expval, endog_expval) else: psum = endog_expval[:, None] + endog_expval[None, :] pprod = endog_expval[:, None] * endog_expval[None, :] pfac = np.sqrt((1. + psum * (current_or - 1.))**2 + 4 * current_or * (1. - current_or) * pprod) vmat = 1. + psum * (current_or - 1.) - pfac vmat /= 2. * (current_or - 1) # Fix E[YY'] for elements that belong to same observation for bdl in ibd: evy = endog_expval[bdl[0]:bdl[1]] if self.endog_type == "ordinal": vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\ np.minimum.outer(evy, evy) else: vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] = np.diag(evy) return vmat def update(self, params): """ Update the global odds ratio based on the current value of params. """ endog = self.model.endog_li cpp = self.cpp cached_means = self.model.cached_means # This will happen if all the clusters have only # one observation if len(cpp[0]) == 0: return tables = {} for ii in cpp[0]: tables[ii] = np.zeros((2, 2), dtype=np.float64) for i in range(self.model.num_group): endog_expval, _ = cached_means[i] emat_11 = self.get_eyy(endog_expval, i) emat_10 = endog_expval[:, None] - emat_11 emat_01 = -emat_11 + endog_expval emat_00 = 1. - (emat_11 + emat_10 + emat_01) cpp1 = cpp[i] for ky in iterkeys(cpp1): ix = cpp1[ky] tables[ky][1, 1] += emat_11[ix[:, 0], ix[:, 1]].sum() tables[ky][1, 0] += emat_10[ix[:, 0], ix[:, 1]].sum() tables[ky][0, 1] += emat_01[ix[:, 0], ix[:, 1]].sum() tables[ky][0, 0] += emat_00[ix[:, 0], ix[:, 1]].sum() cor_expval = self.pooled_odds_ratio(list(itervalues(tables))) self.dep_params *= self.crude_or / cor_expval if not np.isfinite(self.dep_params): self.dep_params = 1. warnings.warn("dep_params became inf, resetting to 1", ConvergenceWarning) update.__doc__ = CovStruct.update.__doc__ covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__ def summary(self): return "Global odds ratio: %.3f\n" % self.dep_params class OrdinalIndependence(CategoricalCovStruct): """ An independence covariance structure for ordinal models. The working covariance between indicators derived from different observations is zero. The working covariance between indicators derived form a common observation is determined from their current mean values. There are no parameters to estimate in this covariance structure. """ def covariance_matrix(self, expected_value, index): ibd = self.ibd[index] n = len(expected_value) vmat = np.zeros((n, n)) for bdl in ibd: ev = expected_value[bdl[0]:bdl[1]] vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\ np.minimum.outer(ev, ev) - np.outer(ev, ev) return vmat, False # Nothing to update def update(self, params): pass class NominalIndependence(CategoricalCovStruct): """ An independence covariance structure for nominal models. The working covariance between indicators derived from different observations is zero. The working covariance between indicators derived form a common observation is determined from their current mean values. There are no parameters to estimate in this covariance structure. """ def covariance_matrix(self, expected_value, index): ibd = self.ibd[index] n = len(expected_value) vmat = np.zeros((n, n)) for bdl in ibd: ev = expected_value[bdl[0]:bdl[1]] vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\ np.diag(ev) - np.outer(ev, ev) return vmat, False # Nothing to update def update(self, params): pass class Equivalence(CovStruct): """ A covariance structure defined in terms of equivalence classes. An 'equivalence class' is a set of pairs of observations such that the covariance of every pair within the equivalence class has a common value. Parameters ---------- pairs : dict-like A dictionary of dictionaries, where `pairs[group][label]` provides the indices of all pairs of observations in the group that have the same covariance value. Specifically, `pairs[group][label]` is a tuple `(j1, j2)`, where `j1` and `j2` are integer arrays of the same length. `j1[i], j2[i]` is one index pair that belongs to the `label` equivalence class. Only one triangle of each covariance matrix should be included. Positions where j1 and j2 have the same value are variance parameters. labels : array-like An array of labels such that every distinct pair of labels defines an equivalence class. Either `labels` or `pairs` must be provided. When the two labels in a pair are equal two equivalence classes are defined: one for the diagonal elements (corresponding to variances) and one for the off-diagonal elements (corresponding to covariances). return_cov : boolean If True, `covariance_matrix` returns an estimate of the covariance matrix, otherwise returns an estimate of the correlation matrix. Notes ----- Using `labels` to define the class is much easier than using `pairs`, but is less general. Any pair of values not contained in `pairs` will be assigned zero covariance. The index values in `pairs` are row indices into the `exog` matrix. They are not updated if missing data are present. When using this covariance structure, missing data should be removed before constructing the model. If using `labels`, after a model is defined using the covariance structure it is possible to remove a label pair from the second level of the `pairs` dictionary to force the corresponding covariance to be zero. Examples -------- The following sets up the `pairs` dictionary for a model with two groups, equal variance for all observations, and constant covariance for all pairs of observations within each group. >> pairs = {0: {}, 1: {}} >> pairs[0][0] = (np.r_[0, 1, 2], np.r_[0, 1, 2]) >> pairs[0][1] = np.tril_indices(3, -1) >> pairs[1][0] = (np.r_[3, 4, 5], np.r_[3, 4, 5]) >> pairs[1][2] = 3 + np.tril_indices(3, -1) """ def __init__(self, pairs=None, labels=None, return_cov=False): super(Equivalence, self).__init__() if (pairs is None) and (labels is None): raise ValueError("Equivalence cov_struct requires either `pairs` or `labels`") if (pairs is not None) and (labels is not None): raise ValueError("Equivalence cov_struct accepts only one of `pairs` and `labels`") if pairs is not None: import copy self.pairs = copy.deepcopy(pairs) if labels is not None: self.labels = np.asarray(labels) self.return_cov = return_cov def _make_pairs(self, i, j): """ Create arrays `i_`, `j_` containing all unique ordered pairs of elements in `i` and `j`. The arrays `i` and `j` must be one-dimensional containing non-negative integers. """ mat = np.zeros((len(i)*len(j), 2), dtype=np.int32) # Create the pairs and order them f = np.ones(len(j)) mat[:, 0] = np.kron(f, i).astype(np.int32) f = np.ones(len(i)) mat[:, 1] = np.kron(j, f).astype(np.int32) mat.sort(1) # Remove repeated rows try: dtype = np.dtype((np.void, mat.dtype.itemsize * mat.shape[1])) bmat = np.ascontiguousarray(mat).view(dtype) _, idx = np.unique(bmat, return_index=True) except TypeError: # workaround for old numpy that can't call unique with complex # dtypes np.random.seed(4234) bmat = np.dot(mat, np.random.uniform(size=mat.shape[1])) _, idx = np.unique(bmat, return_index=True) mat = mat[idx, :] return mat[:, 0], mat[:, 1] def _pairs_from_labels(self): from collections import defaultdict pairs = defaultdict(lambda : defaultdict(lambda : None)) model = self.model df = pd.DataFrame({"labels": self.labels, "groups": model.groups}) gb = df.groupby(["groups", "labels"]) ulabels = np.unique(self.labels) for g_ix, g_lb in enumerate(model.group_labels): # Loop over label pairs for lx1 in range(len(ulabels)): for lx2 in range(lx1+1): lb1 = ulabels[lx1] lb2 = ulabels[lx2] try: i1 = gb.groups[(g_lb, lb1)] i2 = gb.groups[(g_lb, lb2)] except KeyError: continue i1, i2 = self._make_pairs(i1, i2) clabel = str(lb1) + "/" + str(lb2) # Variance parameters belong in their own equiv class. jj = np.flatnonzero(i1 == i2) if len(jj) > 0: clabelv = clabel + "/v" pairs[g_lb][clabelv] = (i1[jj], i2[jj]) # Covariance parameters jj = np.flatnonzero(i1 != i2) if len(jj) > 0: i1 = i1[jj] i2 = i2[jj] pairs[g_lb][clabel] = (i1, i2) self.pairs = pairs def initialize(self, model): super(Equivalence, self).initialize(model) if self.model.weights is not None: warnings.warn("weights not implemented for equalence cov_struct, using unweighted covariance estimate") if not hasattr(self, 'pairs'): self._pairs_from_labels() # Initialize so that any equivalence class containing a # variance parameter has value 1. self.dep_params = defaultdict(lambda : 0.) self._var_classes = set([]) for gp in self.model.group_labels: for lb in self.pairs[gp]: j1, j2 = self.pairs[gp][lb] if np.any(j1 == j2): if not np.all(j1 == j2): warnings.warn("equivalence class contains both variance and covariance parameters") self._var_classes.add(lb) self.dep_params[lb] = 1 # Need to start indexing at 0 within each group. # rx maps olds indices to new indices rx = -1 * np.ones(len(self.model.endog), dtype=np.int32) for g_ix, g_lb in enumerate(self.model.group_labels): ii = self.model.group_indices[g_lb] rx[ii] = np.arange(len(ii), dtype=np.int32) # Reindex for gp in self.model.group_labels: for lb in self.pairs[gp].keys(): a, b = self.pairs[gp][lb] self.pairs[gp][lb] = (rx[a], rx[b]) def update(self, params): endog = self.model.endog_li varfunc = self.model.family.variance cached_means = self.model.cached_means dep_params = defaultdict(lambda : [0., 0., 0.]) n_pairs = defaultdict(lambda : 0) dim = len(params) for k, gp in enumerate(self.model.group_labels): expval, _ = cached_means[k] stdev = np.sqrt(varfunc(expval)) resid = (endog[k] - expval) / stdev for lb in self.pairs[gp].keys(): if (not self.return_cov) and lb in self._var_classes: continue jj = self.pairs[gp][lb] dep_params[lb][0] += np.sum(resid[jj[0]] * resid[jj[1]]) if not self.return_cov: dep_params[lb][1] += np.sum(resid[jj[0]]**2) dep_params[lb][2] += np.sum(resid[jj[1]]**2) n_pairs[lb] += len(jj[0]) if self.return_cov: for lb in dep_params.keys(): dep_params[lb] = dep_params[lb][0] / (n_pairs[lb] - dim) else: for lb in dep_params.keys(): den = np.sqrt(dep_params[lb][1] * dep_params[lb][2]) dep_params[lb] = dep_params[lb][0] / den for lb in self._var_classes: dep_params[lb] = 1. self.dep_params = dep_params self.n_pairs = n_pairs def covariance_matrix(self, expval, index): dim = len(expval) cmat = np.zeros((dim, dim)) g_lb = self.model.group_labels[index] for lb in self.pairs[g_lb].keys(): j1, j2 = self.pairs[g_lb][lb] cmat[j1, j2] = self.dep_params[lb] cmat = cmat + cmat.T np.fill_diagonal(cmat, cmat.diagonal() / 2) return cmat, not self.return_cov update.__doc__ = CovStruct.update.__doc__ covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
bsd-3-clause
pombreda/syzygy
third_party/numpy/files/numpy/lib/recfunctions.py
23
34483
""" Collection of utilities to manipulate structured arrays. Most of these functions were initially implemented by John Hunter for matplotlib. They have been rewritten and extended for convenience. """ import sys import itertools import numpy as np import numpy.ma as ma from numpy import ndarray, recarray from numpy.ma import MaskedArray from numpy.ma.mrecords import MaskedRecords from numpy.lib._iotools import _is_string_like _check_fill_value = np.ma.core._check_fill_value __all__ = ['append_fields', 'drop_fields', 'find_duplicates', 'get_fieldstructure', 'join_by', 'merge_arrays', 'rec_append_fields', 'rec_drop_fields', 'rec_join', 'recursive_fill_fields', 'rename_fields', 'stack_arrays', ] def recursive_fill_fields(input, output): """ Fills fields from output with fields from input, with support for nested structures. Parameters ---------- input : ndarray Input array. output : ndarray Output array. Notes ----- * `output` should be at least the same size as `input` Examples -------- >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) >>> b = np.zeros((3,), dtype=a.dtype) >>> rfn.recursive_fill_fields(a, b) array([(1, 10.0), (2, 20.0), (0, 0.0)], dtype=[('A', '<i4'), ('B', '<f8')]) """ newdtype = output.dtype for field in newdtype.names: try: current = input[field] except ValueError: continue if current.dtype.names: recursive_fill_fields(current, output[field]) else: output[field][:len(current)] = current return output def get_names(adtype): """ Returns the field names of the input datatype as a tuple. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names(np.empty((1,), dtype=int)) is None True >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names(adtype) ('a', ('b', ('ba', 'bb'))) """ listnames = [] names = adtype.names for name in names: current = adtype[name] if current.names: listnames.append((name, tuple(get_names(current)))) else: listnames.append(name) return tuple(listnames) or None def get_names_flat(adtype): """ Returns the field names of the input datatype as a tuple. Nested structure are flattend beforehand. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None True >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb') """ listnames = [] names = adtype.names for name in names: listnames.append(name) current = adtype[name] if current.names: listnames.extend(get_names_flat(current)) return tuple(listnames) or None def flatten_descr(ndtype): """ Flatten a structured data-type description. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])]) >>> rfn.flatten_descr(ndtype) (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) """ names = ndtype.names if names is None: return ndtype.descr else: descr = [] for field in names: (typ, _) = ndtype.fields[field] if typ.names: descr.extend(flatten_descr(typ)) else: descr.append((field, typ)) return tuple(descr) def zip_descr(seqarrays, flatten=False): """ Combine the dtype description of a series of arrays. Parameters ---------- seqarrays : sequence of arrays Sequence of arrays flatten : {boolean}, optional Whether to collapse nested descriptions. """ newdtype = [] if flatten: for a in seqarrays: newdtype.extend(flatten_descr(a.dtype)) else: for a in seqarrays: current = a.dtype names = current.names or () if len(names) > 1: newdtype.append(('', current.descr)) else: newdtype.extend(current.descr) return np.dtype(newdtype).descr def get_fieldstructure(adtype, lastname=None, parents=None,): """ Returns a dictionary with fields as keys and a list of parent fields as values. This function is used to simplify access to fields nested in other fields. Parameters ---------- adtype : np.dtype Input datatype lastname : optional Last processed field name (used internally during recursion). parents : dictionary Dictionary of parent fields (used interbally during recursion). Examples -------- >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('A', int), ... ('B', [('BA', int), ... ('BB', [('BBA', int), ('BBB', int)])])]) >>> rfn.get_fieldstructure(ndtype) ... # XXX: possible regression, order of BBA and BBB is swapped {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} """ if parents is None: parents = {} names = adtype.names for name in names: current = adtype[name] if current.names: if lastname: parents[name] = [lastname, ] else: parents[name] = [] parents.update(get_fieldstructure(current, name, parents)) else: lastparent = [_ for _ in (parents.get(lastname, []) or [])] if lastparent: # if (lastparent[-1] != lastname): lastparent.append(lastname) elif lastname: lastparent = [lastname, ] parents[name] = lastparent or [] return parents or None def _izip_fields_flat(iterable): """ Returns an iterator of concatenated fields from a sequence of arrays, collapsing any nested structure. """ for element in iterable: if isinstance(element, np.void): for f in _izip_fields_flat(tuple(element)): yield f else: yield element def _izip_fields(iterable): """ Returns an iterator of concatenated fields from a sequence of arrays. """ for element in iterable: if hasattr(element, '__iter__') and not isinstance(element, basestring): for f in _izip_fields(element): yield f elif isinstance(element, np.void) and len(tuple(element)) == 1: for f in _izip_fields(element): yield f else: yield element def izip_records(seqarrays, fill_value=None, flatten=True): """ Returns an iterator of concatenated items from a sequence of arrays. Parameters ---------- seqarray : sequence of arrays Sequence of arrays. fill_value : {None, integer} Value used to pad shorter iterables. flatten : {True, False}, Whether to """ # OK, that's a complete ripoff from Python2.6 itertools.izip_longest def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop): "Yields the fill_value or raises IndexError" yield counter() # fillers = itertools.repeat(fill_value) iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays] # Should we flatten the items, or just use a nested approach if flatten: zipfunc = _izip_fields_flat else: zipfunc = _izip_fields # try: for tup in itertools.izip(*iters): yield tuple(zipfunc(tup)) except IndexError: pass def _fix_output(output, usemask=True, asrecarray=False): """ Private function: return a recarray, a ndarray, a MaskedArray or a MaskedRecords depending on the input parameters """ if not isinstance(output, MaskedArray): usemask = False if usemask: if asrecarray: output = output.view(MaskedRecords) else: output = ma.filled(output) if asrecarray: output = output.view(recarray) return output def _fix_defaults(output, defaults=None): """ Update the fill_value and masked data of `output` from the default given in a dictionary defaults. """ names = output.dtype.names (data, mask, fill_value) = (output.data, output.mask, output.fill_value) for (k, v) in (defaults or {}).iteritems(): if k in names: fill_value[k] = v data[k][mask[k]] = v return output def merge_arrays(seqarrays, fill_value= -1, flatten=False, usemask=False, asrecarray=False): """ Merge arrays field by field. Parameters ---------- seqarrays : sequence of ndarrays Sequence of arrays fill_value : {float}, optional Filling value used to pad missing data on the shorter arrays. flatten : {False, True}, optional Whether to collapse nested fields. usemask : {False, True}, optional Whether to return a masked array or not. asrecarray : {False, True}, optional Whether to return a recarray (MaskedRecords) or not. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)], mask = [(False, False) (False, False) (True, False)], fill_value = (999999, 1e+20), dtype = [('f0', '<i4'), ('f1', '<f8')]) >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])), ... usemask=False) array([(1, 10.0), (2, 20.0), (-1, 30.0)], dtype=[('f0', '<i4'), ('f1', '<f8')]) >>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]), ... np.array([10., 20., 30.])), ... usemask=False, asrecarray=True) rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)], dtype=[('a', '<i4'), ('f1', '<f8')]) Notes ----- * Without a mask, the missing value will be filled with something, * depending on what its corresponding type: -1 for integers -1.0 for floating point numbers '-' for characters '-1' for strings True for boolean values * XXX: I just obtained these values empirically """ # Only one item in the input sequence ? if (len(seqarrays) == 1): seqarrays = np.asanyarray(seqarrays[0]) # Do we have a single ndarray as input ? if isinstance(seqarrays, (ndarray, np.void)): seqdtype = seqarrays.dtype if (not flatten) or \ (zip_descr((seqarrays,), flatten=True) == seqdtype.descr): # Minimal processing needed: just make sure everythng's a-ok seqarrays = seqarrays.ravel() # Make sure we have named fields if not seqdtype.names: seqdtype = [('', seqdtype)] # Find what type of array we must return if usemask: if asrecarray: seqtype = MaskedRecords else: seqtype = MaskedArray elif asrecarray: seqtype = recarray else: seqtype = ndarray return seqarrays.view(dtype=seqdtype, type=seqtype) else: seqarrays = (seqarrays,) else: # Make sure we have arrays in the input sequence seqarrays = map(np.asanyarray, seqarrays) # Find the sizes of the inputs and their maximum sizes = tuple(a.size for a in seqarrays) maxlength = max(sizes) # Get the dtype of the output (flattening if needed) newdtype = zip_descr(seqarrays, flatten=flatten) # Initialize the sequences for data and mask seqdata = [] seqmask = [] # If we expect some kind of MaskedArray, make a special loop. if usemask: for (a, n) in itertools.izip(seqarrays, sizes): nbmissing = (maxlength - n) # Get the data and mask data = a.ravel().__array__() mask = ma.getmaskarray(a).ravel() # Get the filling value (if needed) if nbmissing: fval = _check_fill_value(fill_value, a.dtype) if isinstance(fval, (ndarray, np.void)): if len(fval.dtype) == 1: fval = fval.item()[0] fmsk = True else: fval = np.array(fval, dtype=a.dtype, ndmin=1) fmsk = np.ones((1,), dtype=mask.dtype) else: fval = None fmsk = True # Store an iterator padding the input to the expected length seqdata.append(itertools.chain(data, [fval] * nbmissing)) seqmask.append(itertools.chain(mask, [fmsk] * nbmissing)) # Create an iterator for the data data = tuple(izip_records(seqdata, flatten=flatten)) output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength), mask=list(izip_records(seqmask, flatten=flatten))) if asrecarray: output = output.view(MaskedRecords) else: # Same as before, without the mask we don't need... for (a, n) in itertools.izip(seqarrays, sizes): nbmissing = (maxlength - n) data = a.ravel().__array__() if nbmissing: fval = _check_fill_value(fill_value, a.dtype) if isinstance(fval, (ndarray, np.void)): if len(fval.dtype) == 1: fval = fval.item()[0] else: fval = np.array(fval, dtype=a.dtype, ndmin=1) else: fval = None seqdata.append(itertools.chain(data, [fval] * nbmissing)) output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)), dtype=newdtype, count=maxlength) if asrecarray: output = output.view(recarray) # And we're done... return output def drop_fields(base, drop_names, usemask=True, asrecarray=False): """ Return a new array with fields in `drop_names` dropped. Nested fields are supported. Parameters ---------- base : array Input array drop_names : string or sequence String or sequence of strings corresponding to the names of the fields to drop. usemask : {False, True}, optional Whether to return a masked array or not. asrecarray : string or sequence Whether to return a recarray or a mrecarray (`asrecarray=True`) or a plain ndarray or masked array with flexible dtype (`asrecarray=False`) Examples -------- >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) >>> rfn.drop_fields(a, 'a') array([((2.0, 3),), ((5.0, 6),)], dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])]) >>> rfn.drop_fields(a, 'ba') array([(1, (3,)), (4, (6,))], dtype=[('a', '<i4'), ('b', [('bb', '<i4')])]) >>> rfn.drop_fields(a, ['ba', 'bb']) array([(1,), (4,)], dtype=[('a', '<i4')]) """ if _is_string_like(drop_names): drop_names = [drop_names, ] else: drop_names = set(drop_names) # def _drop_descr(ndtype, drop_names): names = ndtype.names newdtype = [] for name in names: current = ndtype[name] if name in drop_names: continue if current.names: descr = _drop_descr(current, drop_names) if descr: newdtype.append((name, descr)) else: newdtype.append((name, current)) return newdtype # newdtype = _drop_descr(base.dtype, drop_names) if not newdtype: return None # output = np.empty(base.shape, dtype=newdtype) output = recursive_fill_fields(base, output) return _fix_output(output, usemask=usemask, asrecarray=asrecarray) def rec_drop_fields(base, drop_names): """ Returns a new numpy.recarray with fields in `drop_names` dropped. """ return drop_fields(base, drop_names, usemask=False, asrecarray=True) def rename_fields(base, namemapper): """ Rename the fields from a flexible-datatype ndarray or recarray. Nested fields are supported. Parameters ---------- base : ndarray Input array whose fields must be modified. namemapper : dictionary Dictionary mapping old field names to their new version. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))], dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])]) """ def _recursive_rename_fields(ndtype, namemapper): newdtype = [] for name in ndtype.names: newname = namemapper.get(name, name) current = ndtype[name] if current.names: newdtype.append((newname, _recursive_rename_fields(current, namemapper))) else: newdtype.append((newname, current)) return newdtype newdtype = _recursive_rename_fields(base.dtype, namemapper) return base.view(newdtype) def append_fields(base, names, data, dtypes=None, fill_value= -1, usemask=True, asrecarray=False): """ Add new fields to an existing array. The names of the fields are given with the `names` arguments, the corresponding values with the `data` arguments. If a single field is appended, `names`, `data` and `dtypes` do not have to be lists but just values. Parameters ---------- base : array Input array to extend. names : string, sequence String or sequence of strings corresponding to the names of the new fields. data : array or sequence of arrays Array or sequence of arrays storing the fields to add to the base. dtypes : sequence of datatypes, optional Datatype or sequence of datatypes. If None, the datatypes are estimated from the `data`. fill_value : {float}, optional Filling value used to pad missing data on the shorter arrays. usemask : {False, True}, optional Whether to return a masked array or not. asrecarray : {False, True}, optional Whether to return a recarray (MaskedRecords) or not. """ # Check the names if isinstance(names, (tuple, list)): if len(names) != len(data): msg = "The number of arrays does not match the number of names" raise ValueError(msg) elif isinstance(names, basestring): names = [names, ] data = [data, ] # if dtypes is None: data = [np.array(a, copy=False, subok=True) for a in data] data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)] else : if not isinstance(dtypes, (tuple, list)): dtypes = [dtypes, ] if len(data) != len(dtypes): if len(dtypes) == 1: dtypes = dtypes * len(data) else: msg = "The dtypes argument must be None, a dtype, or a list." raise ValueError(msg) data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)]) for (a, n, d) in zip(data, names, dtypes)] # base = merge_arrays(base, usemask=usemask, fill_value=fill_value) if len(data) > 1: data = merge_arrays(data, flatten=True, usemask=usemask, fill_value=fill_value) else: data = data.pop() # output = ma.masked_all(max(len(base), len(data)), dtype=base.dtype.descr + data.dtype.descr) output = recursive_fill_fields(base, output) output = recursive_fill_fields(data, output) # return _fix_output(output, usemask=usemask, asrecarray=asrecarray) def rec_append_fields(base, names, data, dtypes=None): """ Add new fields to an existing array. The names of the fields are given with the `names` arguments, the corresponding values with the `data` arguments. If a single field is appended, `names`, `data` and `dtypes` do not have to be lists but just values. Parameters ---------- base : array Input array to extend. names : string, sequence String or sequence of strings corresponding to the names of the new fields. data : array or sequence of arrays Array or sequence of arrays storing the fields to add to the base. dtypes : sequence of datatypes, optional Datatype or sequence of datatypes. If None, the datatypes are estimated from the `data`. See Also -------- append_fields Returns ------- appended_array : np.recarray """ return append_fields(base, names, data=data, dtypes=dtypes, asrecarray=True, usemask=False) def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, autoconvert=False): """ Superposes arrays fields by fields Parameters ---------- seqarrays : array or sequence Sequence of input arrays. defaults : dictionary, optional Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`) or a ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. autoconvert : {False, True}, optional Whether automatically cast the type of the field to the maximum. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> x = np.array([1, 2,]) >>> rfn.stack_arrays(x) is x True >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], ... dtype=[('A', '|S3'), ('B', float), ('C', float)]) >>> test = rfn.stack_arrays((z,zz)) >>> test masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0) ('c', 30.0, 300.0)], mask = [(False, False, True) (False, False, True) (False, False, False) (False, False, False) (False, False, False)], fill_value = ('N/A', 1e+20, 1e+20), dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')]) """ if isinstance(arrays, ndarray): return arrays elif len(arrays) == 1: return arrays[0] seqarrays = [np.asanyarray(a).ravel() for a in arrays] nrecords = [len(a) for a in seqarrays] ndtype = [a.dtype for a in seqarrays] fldnames = [d.names for d in ndtype] # dtype_l = ndtype[0] newdescr = dtype_l.descr names = [_[0] for _ in newdescr] for dtype_n in ndtype[1:]: for descr in dtype_n.descr: name = descr[0] or '' if name not in names: newdescr.append(descr) names.append(name) else: nameidx = names.index(name) current_descr = newdescr[nameidx] if autoconvert: if np.dtype(descr[1]) > np.dtype(current_descr[-1]): current_descr = list(current_descr) current_descr[-1] = descr[1] newdescr[nameidx] = tuple(current_descr) elif descr[1] != current_descr[-1]: raise TypeError("Incompatible type '%s' <> '%s'" % \ (dict(newdescr)[name], descr[1])) # Only one field: use concatenate if len(newdescr) == 1: output = ma.concatenate(seqarrays) else: # output = ma.masked_all((np.sum(nrecords),), newdescr) offset = np.cumsum(np.r_[0, nrecords]) seen = [] for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): names = a.dtype.names if names is None: output['f%i' % len(seen)][i:j] = a else: for name in n: output[name][i:j] = a[name] if name not in seen: seen.append(name) # return _fix_output(_fix_defaults(output, defaults), usemask=usemask, asrecarray=asrecarray) def find_duplicates(a, key=None, ignoremask=True, return_index=False): """ Find the duplicates in a structured array along a given key Parameters ---------- a : array-like Input array key : {string, None}, optional Name of the fields along which to check the duplicates. If None, the search is performed by records ignoremask : {True, False}, optional Whether masked data should be discarded or considered as duplicates. return_index : {False, True}, optional Whether to return the indices of the duplicated values. Examples -------- >>> from numpy.lib import recfunctions as rfn >>> ndtype = [('a', int)] >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) ... # XXX: judging by the output, the ignoremask flag has no effect """ a = np.asanyarray(a).ravel() # Get a dictionary of fields fields = get_fieldstructure(a.dtype) # Get the sorting data (by selecting the corresponding field) base = a if key: for f in fields[key]: base = base[f] base = base[key] # Get the sorting indices and the sorted data sortidx = base.argsort() sortedbase = base[sortidx] sorteddata = sortedbase.filled() # Compare the sorting data flag = (sorteddata[:-1] == sorteddata[1:]) # If masked data must be ignored, set the flag to false where needed if ignoremask: sortedmask = sortedbase.recordmask flag[sortedmask[1:]] = False flag = np.concatenate(([False], flag)) # We need to take the point on the left as well (else we're missing it) flag[:-1] = flag[:-1] + flag[1:] duplicates = a[sortidx][flag] if return_index: return (duplicates, sortidx[flag]) else: return duplicates def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', defaults=None, usemask=True, asrecarray=False): """ Join arrays `r1` and `r2` on key `key`. The key should be either a string or a sequence of string corresponding to the fields used to join the array. An exception is raised if the `key` field cannot be found in the two input arrays. Neither `r1` nor `r2` should have any duplicates along `key`: the presence of duplicates will make the output quite unreliable. Note that duplicates are not looked for by the algorithm. Parameters ---------- key : {string, sequence} A string or a sequence of strings corresponding to the fields used for comparison. r1, r2 : arrays Structured arrays. jointype : {'inner', 'outer', 'leftouter'}, optional If 'inner', returns the elements common to both r1 and r2. If 'outer', returns the common elements as well as the elements of r1 not in r2 and the elements of not in r2. If 'leftouter', returns the common elements and the elements of r1 not in r2. r1postfix : string, optional String appended to the names of the fields of r1 that are present in r2 but absent of the key. r2postfix : string, optional String appended to the names of the fields of r2 that are present in r1 but absent of the key. defaults : {dictionary}, optional Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`) or a ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. Notes ----- * The output is sorted along the key. * A temporary array is formed by dropping the fields not in the key for the two arrays and concatenating the result. This array is then sorted, and the common entries selected. The output is constructed by filling the fields with the selected entries. Matching is not preserved if there are some duplicates... """ # Check jointype if jointype not in ('inner', 'outer', 'leftouter'): raise ValueError("The 'jointype' argument should be in 'inner', "\ "'outer' or 'leftouter' (got '%s' instead)" % jointype) # If we have a single key, put it in a tuple if isinstance(key, basestring): key = (key,) # Check the keys for name in key: if name not in r1.dtype.names: raise ValueError('r1 does not have key field %s' % name) if name not in r2.dtype.names: raise ValueError('r2 does not have key field %s' % name) # Make sure we work with ravelled arrays r1 = r1.ravel() r2 = r2.ravel() (nb1, nb2) = (len(r1), len(r2)) (r1names, r2names) = (r1.dtype.names, r2.dtype.names) # Make temporary arrays of just the keys r1k = drop_fields(r1, [n for n in r1names if n not in key]) r2k = drop_fields(r2, [n for n in r2names if n not in key]) # Concatenate the two arrays for comparison aux = ma.concatenate((r1k, r2k)) idx_sort = aux.argsort(order=key) aux = aux[idx_sort] # # Get the common keys flag_in = ma.concatenate(([False], aux[1:] == aux[:-1])) flag_in[:-1] = flag_in[1:] + flag_in[:-1] idx_in = idx_sort[flag_in] idx_1 = idx_in[(idx_in < nb1)] idx_2 = idx_in[(idx_in >= nb1)] - nb1 (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) if jointype == 'inner': (r1spc, r2spc) = (0, 0) elif jointype == 'outer': idx_out = idx_sort[~flag_in] idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) elif jointype == 'leftouter': idx_out = idx_sort[~flag_in] idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) # Select the entries from each input (s1, s2) = (r1[idx_1], r2[idx_2]) # # Build the new description of the output array ....... # Start with the key fields ndtype = [list(_) for _ in r1k.dtype.descr] # Add the other fields ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key) # Find the new list of names (it may be different from r1names) names = list(_[0] for _ in ndtype) for desc in r2.dtype.descr: desc = list(desc) name = desc[0] # Have we seen the current name already ? if name in names: nameidx = names.index(name) current = ndtype[nameidx] # The current field is part of the key: take the largest dtype if name in key: current[-1] = max(desc[1], current[-1]) # The current field is not part of the key: add the suffixes else: current[0] += r1postfix desc[0] += r2postfix ndtype.insert(nameidx + 1, desc) #... we haven't: just add the description to the current list else: names.extend(desc[0]) ndtype.append(desc) # Revert the elements to tuples ndtype = [tuple(_) for _ in ndtype] # Find the largest nb of common fields : r1cmn and r2cmn should be equal, but... cmn = max(r1cmn, r2cmn) # Construct an empty array output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) names = output.dtype.names for f in r1names: selected = s1[f] if f not in names: f += r1postfix current = output[f] current[:r1cmn] = selected[:r1cmn] if jointype in ('outer', 'leftouter'): current[cmn:cmn + r1spc] = selected[r1cmn:] for f in r2names: selected = s2[f] if f not in names: f += r2postfix current = output[f] current[:r2cmn] = selected[:r2cmn] if (jointype == 'outer') and r2spc: current[-r2spc:] = selected[r2cmn:] # Sort and finalize the output output.sort(order=key) kwargs = dict(usemask=usemask, asrecarray=asrecarray) return _fix_output(_fix_defaults(output, defaults), **kwargs) def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', defaults=None): """ Join arrays `r1` and `r2` on keys. Alternative to join_by, that always returns a np.recarray. See Also -------- join_by : equivalent function """ kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, defaults=defaults, usemask=False, asrecarray=True) return join_by(key, r1, r2, **kwargs)
apache-2.0
sovicak/AnonymniAnalytici
2018_02_15_cryptocurrencies_trading/algorithms/shared/sell_when_bear-1500964780725.py
1
3936
# Description of a bear market by investopedia # http://www.investopedia.com/terms/b/bearmarket.asp?lgl=rira-baseline-vertical from catalyst.api import order_target_percent, record, symbol, set_benchmark def initialize(context): context.ASSET_NAME = 'USDT_BTC' context.asset = symbol(context.ASSET_NAME) set_benchmark(context.asset) # For all trading pairs in the poloniex bundle, the default denomination # currently supported by Catalyst is 1/1000th of a full coin. Use this # constant to scale the price of up to that of a full coin if desired. context.TICK_SIZE = 1000.0 # Start this trading algorithm when market is bullish context.i = 0 context.IS_MARKET_BEAR = False def handle_data(context, data): # Get price history for the last two months. Find peak, bottom, and last # prices for the period price_history = data.history(context.asset, fields='price', bar_count=60, frequency="1d") peak = price_history.max() bottom = price_history.min() price = price_history.ix[-1] # Trading logic: # If current price is more than 20% lower than highest-closing price over a # 2-month period, market enters Bear territory and algorithm sells all # asset and holds only cash. Market exits bear market when prices are at # least 20% higher than lowest-closing price over a 2-month period. In this # case, algorithm invests 90% of portfolio in the asset. if price < 0.8*peak : context.IS_MARKET_BEAR = True elif price > 1.2*bottom: context.IS_MARKET_BEAR = False if context.IS_MARKET_BEAR: order_target_percent( context.asset, 0.0, ) else: order_target_percent( context.asset, 0.9, ) Portfolio_cumulative_return = (context.portfolio.portfolio_value/context.portfolio.starting_cash-1)*100 # Save values for later inspection record(price=price, peak=peak, bottom=bottom, cash=context.portfolio.cash, leverage=context.account.leverage, Portfolio_cumulative_return=Portfolio_cumulative_return, ) def analyze(context=None, results=None): import matplotlib.pyplot as plt import pandas as pd import sys import os from os.path import basename # Plot the portfolio and asset data. ax1 = plt.subplot(221) (context.TICK_SIZE * results[[ 'price', 'peak', 'bottom', ]]).plot(ax=ax1) ax1.set_ylabel('{asset} (USD)'.format(asset=context.ASSET_NAME)) trans = results.ix[[t != [] for t in results.transactions]] buys = trans.ix[ [t[0]['amount'] > 0 for t in trans.transactions] ] sells = trans.ix[ [t[0]['amount'] < 0 for t in trans.transactions] ] ax1.plot( buys.index, context.TICK_SIZE * results.price[buys.index], '^', markersize=10, color='g', ) ax1.plot( sells.index, context.TICK_SIZE * results.price[sells.index], 'v', markersize=10, color='r', ) ax2 = plt.subplot(222, sharex=ax1) ax2.set_ylabel('Percent Return (%)') results[[ 'algorithm_period_return', 'benchmark_period_return', ]].plot(ax=ax2) ax3 = plt.subplot(223, sharex=ax1) results[['leverage']].plot(ax=ax3) ax3.set_ylabel('Leverage ') ax4 = plt.subplot(224, sharex=ax1) results[['cash']].plot(ax=ax4) ax4.set_ylabel('Cash (USD)') plt.legend(loc=3) # Show the plot. plt.gcf().set_size_inches(16, 8) plt.show() # Save results in CSV file filename = os.path.splitext(basename(sys.argv[3]))[0] results.to_csv(filename + '.csv')
mit
krez13/scikit-learn
examples/bicluster/bicluster_newsgroups.py
142
7183
""" ================================================================ Biclustering documents with the Spectral Co-clustering algorithm ================================================================ This example demonstrates the Spectral Co-clustering algorithm on the twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is excluded because it contains many posts containing nothing but data. The TF-IDF vectorized posts form a word frequency matrix, which is then biclustered using Dhillon's Spectral Co-Clustering algorithm. The resulting document-word biclusters indicate subsets words used more often in those subsets documents. For a few of the best biclusters, its most common document categories and its ten most important words get printed. The best biclusters are determined by their normalized cut. The best words are determined by comparing their sums inside and outside the bicluster. For comparison, the documents are also clustered using MiniBatchKMeans. The document clusters derived from the biclusters achieve a better V-measure than clusters found by MiniBatchKMeans. Output:: Vectorizing... Coclustering... Done in 9.53s. V-measure: 0.4455 MiniBatchKMeans... Done in 12.00s. V-measure: 0.3309 Best biclusters: ---------------- bicluster 0 : 1951 documents, 4373 words categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment bicluster 1 : 1165 documents, 3304 words categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage bicluster 2 : 2219 documents, 2830 words categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package bicluster 3 : 1860 documents, 2745 words categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes bicluster 4 : 12 documents, 155 words categories : 100% rec.sport.hockey words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved """ from __future__ import print_function print(__doc__) from collections import defaultdict import operator import re from time import time import numpy as np from sklearn.cluster.bicluster import SpectralCoclustering from sklearn.cluster import MiniBatchKMeans from sklearn.externals.six import iteritems from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.cluster import v_measure_score def number_aware_tokenizer(doc): """ Tokenizer that maps all numeric tokens to a placeholder. For many applications, tokens that begin with a number are not directly useful, but the fact that such a token exists can be relevant. By applying this form of dimensionality reduction, some methods may perform better. """ token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b') tokens = token_pattern.findall(doc) tokens = ["#NUMBER" if token[0] in "0123456789_" else token for token in tokens] return tokens # exclude 'comp.os.ms-windows.misc' categories = ['alt.atheism', 'comp.graphics', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc'] newsgroups = fetch_20newsgroups(categories=categories) y_true = newsgroups.target vectorizer = TfidfVectorizer(stop_words='english', min_df=5, tokenizer=number_aware_tokenizer) cocluster = SpectralCoclustering(n_clusters=len(categories), svd_method='arpack', random_state=0) kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000, random_state=0) print("Vectorizing...") X = vectorizer.fit_transform(newsgroups.data) print("Coclustering...") start_time = time() cocluster.fit(X) y_cocluster = cocluster.row_labels_ print("Done in {:.2f}s. V-measure: {:.4f}".format( time() - start_time, v_measure_score(y_cocluster, y_true))) print("MiniBatchKMeans...") start_time = time() y_kmeans = kmeans.fit_predict(X) print("Done in {:.2f}s. V-measure: {:.4f}".format( time() - start_time, v_measure_score(y_kmeans, y_true))) feature_names = vectorizer.get_feature_names() document_names = list(newsgroups.target_names[i] for i in newsgroups.target) def bicluster_ncut(i): rows, cols = cocluster.get_indices(i) if not (np.any(rows) and np.any(cols)): import sys return sys.float_info.max row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0] col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0] # Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but # much faster in scipy <= 0.16 weight = X[rows][:, cols].sum() cut = (X[row_complement][:, cols].sum() + X[rows][:, col_complement].sum()) return cut / weight def most_common(d): """Items of a defaultdict(int) with the highest values. Like Counter.most_common in Python >=2.7. """ return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True) bicluster_ncuts = list(bicluster_ncut(i) for i in range(len(newsgroups.target_names))) best_idx = np.argsort(bicluster_ncuts)[:5] print() print("Best biclusters:") print("----------------") for idx, cluster in enumerate(best_idx): n_rows, n_cols = cocluster.get_shape(cluster) cluster_docs, cluster_words = cocluster.get_indices(cluster) if not len(cluster_docs) or not len(cluster_words): continue # categories counter = defaultdict(int) for i in cluster_docs: counter[document_names[i]] += 1 cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name) for name, c in most_common(counter)[:3]) # words out_of_cluster_docs = cocluster.row_labels_ != cluster out_of_cluster_docs = np.where(out_of_cluster_docs)[0] word_col = X[:, cluster_words] word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) - word_col[out_of_cluster_docs, :].sum(axis=0)) word_scores = word_scores.ravel() important_words = list(feature_names[cluster_words[i]] for i in word_scores.argsort()[:-11:-1]) print("bicluster {} : {} documents, {} words".format( idx, n_rows, n_cols)) print("categories : {}".format(cat_string)) print("words : {}\n".format(', '.join(important_words)))
bsd-3-clause
scienceopen/ledtime
firereader.py
2
5334
#!/usr/bin/env python """ Example of reading HiST .fire file used to determine UTC time of camera frame little-endian, MSB is on left bits we don't use: 7 6 5 4 3 bits we use: 2: Ext Trig (ASIC to camera) 1: GPS 1PPS (long pulse, period 1 second) 0: Fire (camera to ASIC) first 1024 bytes are header (with lots of spare space), so don't start reading boolean till byte 1024 (zero-based indexing) Note: the np.int64 are used because some computations actually need 64-bit integers. Best to keep one integer data type for operations involving indexing in these programs to avoid weird bugs. """ from pathlib import Path from numba import jit from dateutil.parser import parse from datetime import datetime from pytz import UTC import numpy as np import logging try: from matplotlib.pyplot import figure,show except ImportError: pass epoch = datetime(1970,1,1,tzinfo=UTC) headerlengthbytes = 1024 def getut1fire(firefn,ut1start): firefn = Path(firefn).expanduser() #%% handle starttime ut1 = tout1(ut1start) #%% read data #read sample rate and fps. Both are signed 64-bit integers used directly in indexing operations with firefn.open('rb') as f: # read HEADER Ts,fps = np.fromfile(f,dtype=np.int64,count=2) print('samples/sec: ',Ts,'frames/sec: ',fps,' file: ',firefn) #skip to data area, header is 1024 bytes long, so goto byte 1024 (zero-based index) f.seek(headerlengthbytes) #NOTE: A for loop with f.seek() could read just the tiny parts of the fire file where matches are supposed to occur. #this could give speed/RAM advantages if necessary # read DATA as it comes off disk, as uint8 bytedat = np.fromfile(f,dtype=np.uint8) #%% find first fire pulse, this is where ut1start should correspond to. """ note we do this with "bytedat" because we want to avoid converting bytes to bits as that operation is RAM-expensive (takes at least 8 times the RAM) """ firstfireind = find_first(7,bytedat) #%% take samples to search for fire floatstride = Ts/fps strideind = np.rint(np.arange(firstfireind,bytedat.size,floatstride)).astype(np.int64) #round to nearest integer #%% search for fire pulses corresponding to each Ext Trig pulse ut1_unix = matchtrigfire(bytedat,strideind,ut1,fps) #%% debug booldat for plotting # this line uses 8 times as much RAM as bytedata, e.g. 4GB for 500MB file booldat = np.unpackbits(bytedat[:500][:,None],axis=1)[:,-3:] #take first 500 samples to avoid overwhelming matplotlib return ut1_unix,booldat def tout1(ut1start): if isinstance(ut1start,str): return (parse(ut1start) - epoch).total_seconds() elif isinstance(ut1start,datetime): return (ut1start-epoch).total_seconds() elif isinstance(ut1start,(float,int)): return ut1start #assuming it's already in UT1 unix epoch else: raise ValueError('I dont understand the format of the ut1 start time youre giving me') def find_first(item, vec): """return the index of the first occurence of item in vec inputs: ------- vec: 1-D array to search for values item: scalar or 1-D array of values to search vec for credit: tal http://stackoverflow.com/questions/7632963/numpy-find-first-index-of-value-fast """ @jit # Numba jit uses C-compiled version of the code in this function def find_first_iter(item,vec): for v in range(len(vec)): for i in item: if i == vec[v]: return v @jit def find_first_sing(item,vec): for v in range(len(vec)): if item == vec[v]: return v if isinstance(item,(tuple,list)): return find_first_iter(item,vec) else: return find_first_sing(item,vec) def matchtrigfire(bytedat,strideind,ut1start,fps): bytesel = bytedat[strideind] kineticsec = 1./fps i=0 ut1 = [] for b in bytesel: if b in (5,7): #trig+fire or trig+gps+fire ut1.append(ut1start+i*kineticsec) elif b in (4,6): logging.warning(f'camera failed to take image at fire sample # {i}') else: logging.warning(f'undefined measurement {b} at fire sample # {i}') i+=1 #must advance whether fire happened or not return ut1 def plotfirebool(ut1,booldat): print('first/last camera frame',datetime.fromtimestamp(ut1[0], tz=UTC), datetime.fromtimestamp(ut1[-1],tz=UTC)) ax = figure().gca() ax.plot(booldat) ax.set_ylim(-0.01,1.01) ax.set_ylabel('boolean value') ax.set_xlabel('sample #') #FIXME label with UT1 time ax.legend(('trig','gps','fire')) if __name__ == '__main__': from time import time from argparse import ArgumentParser p = ArgumentParser(description='convert .fire files to UT1 time') p.add_argument('firefn',help='.fire filename') #'~/data/solis_runtime175.fire' p.add_argument('ut1start',help='UT1 start time of camera from NMEA GPS yyyy-mm-ddThh:mm:ssZ') #'2015-09-01T12:00:00Z' p = p.parse_args() tic = time() ut1,booldat = getut1fire(p.firefn,p.ut1start) print('{:.4f} sec. to read and convert to UT1'.format(time()-tic)) try: plotfirebool(ut1,booldat) show() except: pass
gpl-3.0
itaiin/arrow
python/benchmarks/streaming.py
10
2540
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import pandas as pd import pyarrow as pa from . import common from .common import KILOBYTE, MEGABYTE def generate_chunks(total_size, nchunks, ncols, dtype=np.dtype('int64')): rowsize = total_size // nchunks // ncols assert rowsize % dtype.itemsize == 0 def make_column(col, chunk): return np.frombuffer(common.get_random_bytes( rowsize, seed=col + 997 * chunk)).view(dtype) return [pd.DataFrame({ 'c' + str(col): make_column(col, chunk) for col in range(ncols)}) for chunk in range(nchunks)] class StreamReader(object): """ Benchmark in-memory streaming to a Pandas dataframe. """ total_size = 64 * MEGABYTE ncols = 8 chunk_sizes = [16 * KILOBYTE, 256 * KILOBYTE, 8 * MEGABYTE] param_names = ['chunk_size'] params = [chunk_sizes] def setup(self, chunk_size): # Note we're careful to stream different chunks instead of # streaming N times the same chunk, so that we avoid operating # entirely out of L1/L2. chunks = generate_chunks(self.total_size, nchunks=self.total_size // chunk_size, ncols=self.ncols) batches = [pa.RecordBatch.from_pandas(df) for df in chunks] schema = batches[0].schema sink = pa.BufferOutputStream() stream_writer = pa.RecordBatchStreamWriter(sink, schema) for batch in batches: stream_writer.write_batch(batch) self.source = sink.getvalue() def time_read_to_dataframe(self, *args): reader = pa.RecordBatchStreamReader(self.source) table = reader.read_all() df = table.to_pandas() # noqa
apache-2.0
DGrady/pandas
pandas/tests/tseries/test_holiday.py
16
16104
import pytest from datetime import datetime import pandas.util.testing as tm from pandas import compat from pandas import DatetimeIndex from pandas.tseries.holiday import (USFederalHolidayCalendar, USMemorialDay, USThanksgivingDay, nearest_workday, next_monday_or_tuesday, next_monday, previous_friday, sunday_to_monday, Holiday, DateOffset, MO, SA, Timestamp, AbstractHolidayCalendar, get_calendar, HolidayCalendarFactory, next_workday, previous_workday, before_nearest_workday, EasterMonday, GoodFriday, after_nearest_workday, weekend_to_monday, USLaborDay, USColumbusDay, USMartinLutherKingJr, USPresidentsDay) from pytz import utc class TestCalendar(object): def setup_method(self, method): self.holiday_list = [ datetime(2012, 1, 2), datetime(2012, 1, 16), datetime(2012, 2, 20), datetime(2012, 5, 28), datetime(2012, 7, 4), datetime(2012, 9, 3), datetime(2012, 10, 8), datetime(2012, 11, 12), datetime(2012, 11, 22), datetime(2012, 12, 25)] self.start_date = datetime(2012, 1, 1) self.end_date = datetime(2012, 12, 31) def test_calendar(self): calendar = USFederalHolidayCalendar() holidays = calendar.holidays(self.start_date, self.end_date) holidays_1 = calendar.holidays( self.start_date.strftime('%Y-%m-%d'), self.end_date.strftime('%Y-%m-%d')) holidays_2 = calendar.holidays( Timestamp(self.start_date), Timestamp(self.end_date)) assert list(holidays.to_pydatetime()) == self.holiday_list assert list(holidays_1.to_pydatetime()) == self.holiday_list assert list(holidays_2.to_pydatetime()) == self.holiday_list def test_calendar_caching(self): # Test for issue #9552 class TestCalendar(AbstractHolidayCalendar): def __init__(self, name=None, rules=None): super(TestCalendar, self).__init__(name=name, rules=rules) jan1 = TestCalendar(rules=[Holiday('jan1', year=2015, month=1, day=1)]) jan2 = TestCalendar(rules=[Holiday('jan2', year=2015, month=1, day=2)]) tm.assert_index_equal(jan1.holidays(), DatetimeIndex(['01-Jan-2015'])) tm.assert_index_equal(jan2.holidays(), DatetimeIndex(['02-Jan-2015'])) def test_calendar_observance_dates(self): # Test for issue 11477 USFedCal = get_calendar('USFederalHolidayCalendar') holidays0 = USFedCal.holidays(datetime(2015, 7, 3), datetime( 2015, 7, 3)) # <-- same start and end dates holidays1 = USFedCal.holidays(datetime(2015, 7, 3), datetime( 2015, 7, 6)) # <-- different start and end dates holidays2 = USFedCal.holidays(datetime(2015, 7, 3), datetime( 2015, 7, 3)) # <-- same start and end dates tm.assert_index_equal(holidays0, holidays1) tm.assert_index_equal(holidays0, holidays2) def test_rule_from_name(self): USFedCal = get_calendar('USFederalHolidayCalendar') assert USFedCal.rule_from_name('Thanksgiving') == USThanksgivingDay class TestHoliday(object): def setup_method(self, method): self.start_date = datetime(2011, 1, 1) self.end_date = datetime(2020, 12, 31) def check_results(self, holiday, start, end, expected): assert list(holiday.dates(start, end)) == expected # Verify that timezone info is preserved. assert (list(holiday.dates(utc.localize(Timestamp(start)), utc.localize(Timestamp(end)))) == [utc.localize(dt) for dt in expected]) def test_usmemorialday(self): self.check_results(holiday=USMemorialDay, start=self.start_date, end=self.end_date, expected=[ datetime(2011, 5, 30), datetime(2012, 5, 28), datetime(2013, 5, 27), datetime(2014, 5, 26), datetime(2015, 5, 25), datetime(2016, 5, 30), datetime(2017, 5, 29), datetime(2018, 5, 28), datetime(2019, 5, 27), datetime(2020, 5, 25), ], ) def test_non_observed_holiday(self): self.check_results( Holiday('July 4th Eve', month=7, day=3), start="2001-01-01", end="2003-03-03", expected=[ Timestamp('2001-07-03 00:00:00'), Timestamp('2002-07-03 00:00:00') ] ) self.check_results( Holiday('July 4th Eve', month=7, day=3, days_of_week=(0, 1, 2, 3)), start="2001-01-01", end="2008-03-03", expected=[ Timestamp('2001-07-03 00:00:00'), Timestamp('2002-07-03 00:00:00'), Timestamp('2003-07-03 00:00:00'), Timestamp('2006-07-03 00:00:00'), Timestamp('2007-07-03 00:00:00'), ] ) def test_easter(self): self.check_results(EasterMonday, start=self.start_date, end=self.end_date, expected=[ Timestamp('2011-04-25 00:00:00'), Timestamp('2012-04-09 00:00:00'), Timestamp('2013-04-01 00:00:00'), Timestamp('2014-04-21 00:00:00'), Timestamp('2015-04-06 00:00:00'), Timestamp('2016-03-28 00:00:00'), Timestamp('2017-04-17 00:00:00'), Timestamp('2018-04-02 00:00:00'), Timestamp('2019-04-22 00:00:00'), Timestamp('2020-04-13 00:00:00'), ], ) self.check_results(GoodFriday, start=self.start_date, end=self.end_date, expected=[ Timestamp('2011-04-22 00:00:00'), Timestamp('2012-04-06 00:00:00'), Timestamp('2013-03-29 00:00:00'), Timestamp('2014-04-18 00:00:00'), Timestamp('2015-04-03 00:00:00'), Timestamp('2016-03-25 00:00:00'), Timestamp('2017-04-14 00:00:00'), Timestamp('2018-03-30 00:00:00'), Timestamp('2019-04-19 00:00:00'), Timestamp('2020-04-10 00:00:00'), ], ) def test_usthanksgivingday(self): self.check_results(USThanksgivingDay, start=self.start_date, end=self.end_date, expected=[ datetime(2011, 11, 24), datetime(2012, 11, 22), datetime(2013, 11, 28), datetime(2014, 11, 27), datetime(2015, 11, 26), datetime(2016, 11, 24), datetime(2017, 11, 23), datetime(2018, 11, 22), datetime(2019, 11, 28), datetime(2020, 11, 26), ], ) def test_holidays_within_dates(self): # Fix holiday behavior found in #11477 # where holiday.dates returned dates outside start/end date # or observed rules could not be applied as the holiday # was not in the original date range (e.g., 7/4/2015 -> 7/3/2015) start_date = datetime(2015, 7, 1) end_date = datetime(2015, 7, 1) calendar = get_calendar('USFederalHolidayCalendar') new_years = calendar.rule_from_name('New Years Day') july_4th = calendar.rule_from_name('July 4th') veterans_day = calendar.rule_from_name('Veterans Day') christmas = calendar.rule_from_name('Christmas') # Holiday: (start/end date, holiday) holidays = {USMemorialDay: ("2015-05-25", "2015-05-25"), USLaborDay: ("2015-09-07", "2015-09-07"), USColumbusDay: ("2015-10-12", "2015-10-12"), USThanksgivingDay: ("2015-11-26", "2015-11-26"), USMartinLutherKingJr: ("2015-01-19", "2015-01-19"), USPresidentsDay: ("2015-02-16", "2015-02-16"), GoodFriday: ("2015-04-03", "2015-04-03"), EasterMonday: [("2015-04-06", "2015-04-06"), ("2015-04-05", [])], new_years: [("2015-01-01", "2015-01-01"), ("2011-01-01", []), ("2010-12-31", "2010-12-31")], july_4th: [("2015-07-03", "2015-07-03"), ("2015-07-04", [])], veterans_day: [("2012-11-11", []), ("2012-11-12", "2012-11-12")], christmas: [("2011-12-25", []), ("2011-12-26", "2011-12-26")]} for rule, dates in compat.iteritems(holidays): empty_dates = rule.dates(start_date, end_date) assert empty_dates.tolist() == [] if isinstance(dates, tuple): dates = [dates] for start, expected in dates: if len(expected): expected = [Timestamp(expected)] self.check_results(rule, start, start, expected) def test_argument_types(self): holidays = USThanksgivingDay.dates(self.start_date, self.end_date) holidays_1 = USThanksgivingDay.dates( self.start_date.strftime('%Y-%m-%d'), self.end_date.strftime('%Y-%m-%d')) holidays_2 = USThanksgivingDay.dates( Timestamp(self.start_date), Timestamp(self.end_date)) tm.assert_index_equal(holidays, holidays_1) tm.assert_index_equal(holidays, holidays_2) def test_special_holidays(self): base_date = [datetime(2012, 5, 28)] holiday_1 = Holiday('One-Time', year=2012, month=5, day=28) holiday_2 = Holiday('Range', month=5, day=28, start_date=datetime(2012, 1, 1), end_date=datetime(2012, 12, 31), offset=DateOffset(weekday=MO(1))) assert base_date == holiday_1.dates(self.start_date, self.end_date) assert base_date == holiday_2.dates(self.start_date, self.end_date) def test_get_calendar(self): class TestCalendar(AbstractHolidayCalendar): rules = [] calendar = get_calendar('TestCalendar') assert TestCalendar == calendar.__class__ def test_factory(self): class_1 = HolidayCalendarFactory('MemorialDay', AbstractHolidayCalendar, USMemorialDay) class_2 = HolidayCalendarFactory('Thansksgiving', AbstractHolidayCalendar, USThanksgivingDay) class_3 = HolidayCalendarFactory('Combined', class_1, class_2) assert len(class_1.rules) == 1 assert len(class_2.rules) == 1 assert len(class_3.rules) == 2 class TestObservanceRules(object): def setup_method(self, method): self.we = datetime(2014, 4, 9) self.th = datetime(2014, 4, 10) self.fr = datetime(2014, 4, 11) self.sa = datetime(2014, 4, 12) self.su = datetime(2014, 4, 13) self.mo = datetime(2014, 4, 14) self.tu = datetime(2014, 4, 15) def test_next_monday(self): assert next_monday(self.sa) == self.mo assert next_monday(self.su) == self.mo def test_next_monday_or_tuesday(self): assert next_monday_or_tuesday(self.sa) == self.mo assert next_monday_or_tuesday(self.su) == self.tu assert next_monday_or_tuesday(self.mo) == self.tu def test_previous_friday(self): assert previous_friday(self.sa) == self.fr assert previous_friday(self.su) == self.fr def test_sunday_to_monday(self): assert sunday_to_monday(self.su) == self.mo def test_nearest_workday(self): assert nearest_workday(self.sa) == self.fr assert nearest_workday(self.su) == self.mo assert nearest_workday(self.mo) == self.mo def test_weekend_to_monday(self): assert weekend_to_monday(self.sa) == self.mo assert weekend_to_monday(self.su) == self.mo assert weekend_to_monday(self.mo) == self.mo def test_next_workday(self): assert next_workday(self.sa) == self.mo assert next_workday(self.su) == self.mo assert next_workday(self.mo) == self.tu def test_previous_workday(self): assert previous_workday(self.sa) == self.fr assert previous_workday(self.su) == self.fr assert previous_workday(self.tu) == self.mo def test_before_nearest_workday(self): assert before_nearest_workday(self.sa) == self.th assert before_nearest_workday(self.su) == self.fr assert before_nearest_workday(self.tu) == self.mo def test_after_nearest_workday(self): assert after_nearest_workday(self.sa) == self.mo assert after_nearest_workday(self.su) == self.tu assert after_nearest_workday(self.fr) == self.mo class TestFederalHolidayCalendar(object): def test_no_mlk_before_1986(self): # see gh-10278 class MLKCalendar(AbstractHolidayCalendar): rules = [USMartinLutherKingJr] holidays = MLKCalendar().holidays(start='1984', end='1988').to_pydatetime().tolist() # Testing to make sure holiday is not incorrectly observed before 1986 assert holidays == [datetime(1986, 1, 20, 0, 0), datetime(1987, 1, 19, 0, 0)] def test_memorial_day(self): class MemorialDay(AbstractHolidayCalendar): rules = [USMemorialDay] holidays = MemorialDay().holidays(start='1971', end='1980').to_pydatetime().tolist() # Fixes 5/31 error and checked manually against Wikipedia assert holidays == [datetime(1971, 5, 31, 0, 0), datetime(1972, 5, 29, 0, 0), datetime(1973, 5, 28, 0, 0), datetime(1974, 5, 27, 0, 0), datetime(1975, 5, 26, 0, 0), datetime(1976, 5, 31, 0, 0), datetime(1977, 5, 30, 0, 0), datetime(1978, 5, 29, 0, 0), datetime(1979, 5, 28, 0, 0)] class TestHolidayConflictingArguments(object): def test_both_offset_observance_raises(self): # see gh-10217 with pytest.raises(NotImplementedError): Holiday("Cyber Monday", month=11, day=1, offset=[DateOffset(weekday=SA(4))], observance=next_monday)
bsd-3-clause
mlyundin/scikit-learn
sklearn/utils/validation.py
29
24630
"""Utilities for input validation""" # Authors: Olivier Grisel # Gael Varoquaux # Andreas Mueller # Lars Buitinck # Alexandre Gramfort # Nicolas Tresegnie # License: BSD 3 clause import warnings import numbers import numpy as np import scipy.sparse as sp from ..externals import six from ..utils.fixes import signature FLOAT_DTYPES = (np.float64, np.float32, np.float16) class DataConversionWarning(UserWarning): """A warning on implicit data conversions happening in the code""" pass warnings.simplefilter("always", DataConversionWarning) class NonBLASDotWarning(UserWarning): """A warning on implicit dispatch to numpy.dot""" class NotFittedError(ValueError, AttributeError): """Exception class to raise if estimator is used before fitting This class inherits from both ValueError and AttributeError to help with exception handling and backward compatibility. """ # Silenced by default to reduce verbosity. Turn on at runtime for # performance profiling. warnings.simplefilter('ignore', NonBLASDotWarning) def _assert_all_finite(X): """Like assert_all_finite, but only for ndarray.""" X = np.asanyarray(X) # First try an O(n) time, O(1) space solution for the common case that # everything is finite; fall back to O(n) space np.isfinite to prevent # false positives from overflow in sum method. if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum()) and not np.isfinite(X).all()): raise ValueError("Input contains NaN, infinity" " or a value too large for %r." % X.dtype) def assert_all_finite(X): """Throw a ValueError if X contains NaN or infinity. Input MUST be an np.ndarray instance or a scipy.sparse matrix.""" _assert_all_finite(X.data if sp.issparse(X) else X) def as_float_array(X, copy=True, force_all_finite=True): """Converts an array-like to an array of floats The new dtype will be np.float32 or np.float64, depending on the original type. The function can create a copy or modify the argument depending on the argument copy. Parameters ---------- X : {array-like, sparse matrix} copy : bool, optional If True, a copy of X will be created. If False, a copy may still be returned if X's dtype is not a floating point type. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. Returns ------- XT : {array, sparse matrix} An array of type np.float """ if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray) and not sp.issparse(X)): return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64, copy=copy, force_all_finite=force_all_finite, ensure_2d=False) elif sp.issparse(X) and X.dtype in [np.float32, np.float64]: return X.copy() if copy else X elif X.dtype in [np.float32, np.float64]: # is numpy array return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X else: return X.astype(np.float32 if X.dtype == np.int32 else np.float64) def _is_arraylike(x): """Returns whether the input is array-like""" return (hasattr(x, '__len__') or hasattr(x, 'shape') or hasattr(x, '__array__')) def _num_samples(x): """Return number of samples in array-like x.""" if hasattr(x, 'fit'): # Don't get num_samples from an ensembles length! raise TypeError('Expected sequence or array-like, got ' 'estimator %s' % x) if not hasattr(x, '__len__') and not hasattr(x, 'shape'): if hasattr(x, '__array__'): x = np.asarray(x) else: raise TypeError("Expected sequence or array-like, got %s" % type(x)) if hasattr(x, 'shape'): if len(x.shape) == 0: raise TypeError("Singleton array %r cannot be considered" " a valid collection." % x) return x.shape[0] else: return len(x) def _shape_repr(shape): """Return a platform independent reprensentation of an array shape Under Python 2, the `long` type introduces an 'L' suffix when using the default %r format for tuples of integers (typically used to store the shape of an array). Under Windows 64 bit (and Python 2), the `long` type is used by default in numpy shapes even when the integer dimensions are well below 32 bit. The platform specific type causes string messages or doctests to change from one platform to another which is not desirable. Under Python 3, there is no more `long` type so the `L` suffix is never introduced in string representation. >>> _shape_repr((1, 2)) '(1, 2)' >>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2 >>> _shape_repr((one, 2 * one)) '(1, 2)' >>> _shape_repr((1,)) '(1,)' >>> _shape_repr(()) '()' """ if len(shape) == 0: return "()" joined = ", ".join("%d" % e for e in shape) if len(shape) == 1: # special notation for singleton tuples joined += ',' return "(%s)" % joined def check_consistent_length(*arrays): """Check that all arrays have consistent first dimensions. Checks whether all objects in arrays have the same shape or length. Parameters ---------- *arrays : list or tuple of input objects. Objects that will be checked for consistent length. """ uniques = np.unique([_num_samples(X) for X in arrays if X is not None]) if len(uniques) > 1: raise ValueError("Found arrays with inconsistent numbers of samples: " "%s" % str(uniques)) def indexable(*iterables): """Make arrays indexable for cross-validation. Checks consistent length, passes through None, and ensures that everything can be indexed by converting sparse matrices to csr and converting non-interable objects to arrays. Parameters ---------- *iterables : lists, dataframes, arrays, sparse matrices List of objects to ensure sliceability. """ result = [] for X in iterables: if sp.issparse(X): result.append(X.tocsr()) elif hasattr(X, "__getitem__") or hasattr(X, "iloc"): result.append(X) elif X is None: result.append(X) else: result.append(np.array(X)) check_consistent_length(*result) return result def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy, force_all_finite): """Convert a sparse matrix to a given format. Checks the sparse format of spmatrix and converts if necessary. Parameters ---------- spmatrix : scipy sparse matrix Input to validate and convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats ('csc', 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type or None (default=none) Data type of result. If None, the dtype of the input is preserved. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. Returns ------- spmatrix_converted : scipy sparse matrix. Matrix that is ensured to have an allowed type. """ if accept_sparse in [None, False]: raise TypeError('A sparse matrix was passed, but dense ' 'data is required. Use X.toarray() to ' 'convert to a dense numpy array.') if dtype is None: dtype = spmatrix.dtype changed_format = False if (isinstance(accept_sparse, (list, tuple)) and spmatrix.format not in accept_sparse): # create new with correct sparse spmatrix = spmatrix.asformat(accept_sparse[0]) changed_format = True if dtype != spmatrix.dtype: # convert dtype spmatrix = spmatrix.astype(dtype) elif copy and not changed_format: # force copy spmatrix = spmatrix.copy() if force_all_finite: if not hasattr(spmatrix, "data"): warnings.warn("Can't check %s sparse matrix for nan or inf." % spmatrix.format) else: _assert_all_finite(spmatrix.data) return spmatrix def check_array(array, accept_sparse=None, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, ensure_min_samples=1, ensure_min_features=1, warn_on_dtype=False, estimator=None): """Input validation on an array, list, sparse matrix or similar. By default, the input is converted to an at least 2nd numpy array. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters ---------- array : object Input object to check / convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. warn_on_dtype : boolean (default=False) Raise DataConversionWarning if the dtype of the input data structure does not match the requested dtype, causing a memory copy. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X. """ if isinstance(accept_sparse, str): accept_sparse = [accept_sparse] # store whether originally we wanted numeric dtype dtype_numeric = dtype == "numeric" dtype_orig = getattr(array, "dtype", None) if not hasattr(dtype_orig, 'kind'): # not a data type (e.g. a column named dtype in a pandas DataFrame) dtype_orig = None if dtype_numeric: if dtype_orig is not None and dtype_orig.kind == "O": # if input is object, convert to float. dtype = np.float64 else: dtype = None if isinstance(dtype, (list, tuple)): if dtype_orig is not None and dtype_orig in dtype: # no dtype conversion required dtype = None else: # dtype conversion required. Let's select the first element of the # list of accepted types. dtype = dtype[0] if sp.issparse(array): array = _ensure_sparse_format(array, accept_sparse, dtype, copy, force_all_finite) else: array = np.array(array, dtype=dtype, order=order, copy=copy) if ensure_2d: if array.ndim == 1: warnings.warn( "Passing 1d arrays as data is deprecated in 0.17 and will" "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample.", DeprecationWarning) array = np.atleast_2d(array) # To ensure that array flags are maintained array = np.array(array, dtype=dtype, order=order, copy=copy) # make sure we acually converted to numeric: if dtype_numeric and array.dtype.kind == "O": array = array.astype(np.float64) if not allow_nd and array.ndim >= 3: raise ValueError("Found array with dim %d. Expected <= 2" % array.ndim) if force_all_finite: _assert_all_finite(array) shape_repr = _shape_repr(array.shape) if ensure_min_samples > 0: n_samples = _num_samples(array) if n_samples < ensure_min_samples: raise ValueError("Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required." % (n_samples, shape_repr, ensure_min_samples)) if ensure_min_features > 0 and array.ndim == 2: n_features = array.shape[1] if n_features < ensure_min_features: raise ValueError("Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required." % (n_features, shape_repr, ensure_min_features)) if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig: msg = ("Data with input dtype %s was converted to %s" % (dtype_orig, array.dtype)) if estimator is not None: if not isinstance(estimator, six.string_types): estimator = estimator.__class__.__name__ msg += " by %s" % estimator warnings.warn(msg, DataConversionWarning) return array def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, multi_output=False, ensure_min_samples=1, ensure_min_features=1, y_numeric=False, warn_on_dtype=False, estimator=None): """Input validation for standard estimators. Checks X and y for consistent length, enforces X 2d and y 1d. Standard input checks are only applied to y. For multi-label y, set multi_output=True to allow 2d and sparse y. If the dtype of X is object, attempt converting to float, raising on failure. Parameters ---------- X : nd-array, list or sparse matrix Input data. y : nd-array, list or sparse matrix Labels. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. multi_output : boolean (default=False) Whether to allow 2-d y (array or sparse matrix). If false, y will be validated as a vector. ensure_min_samples : int (default=1) Make sure that X has a minimum number of samples in its first axis (rows for a 2D array). ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when X has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. y_numeric : boolean (default=False) Whether to ensure that y has a numeric type. If dtype of y is object, it is converted to float64. Should only be used for regression algorithms. warn_on_dtype : boolean (default=False) Raise DataConversionWarning if the dtype of the input data structure does not match the requested dtype, causing a memory copy. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X. y_converted : object The converted and validated y. """ X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, warn_on_dtype, estimator) if multi_output: y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False, dtype=None) else: y = column_or_1d(y, warn=True) _assert_all_finite(y) if y_numeric and y.dtype.kind == 'O': y = y.astype(np.float64) check_consistent_length(X, y) return X, y def column_or_1d(y, warn=False): """ Ravel column or 1d numpy array, else raises an error Parameters ---------- y : array-like warn : boolean, default False To control display of warnings. Returns ------- y : array """ shape = np.shape(y) if len(shape) == 1: return np.ravel(y) if len(shape) == 2 and shape[1] == 1: if warn: warnings.warn("A column-vector y was passed when a 1d array was" " expected. Please change the shape of y to " "(n_samples, ), for example using ravel().", DataConversionWarning, stacklevel=2) return np.ravel(y) raise ValueError("bad input shape {0}".format(shape)) def check_random_state(seed): """Turn seed into a np.random.RandomState instance If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, (numbers.Integral, np.integer)): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError('%r cannot be used to seed a numpy.random.RandomState' ' instance' % seed) def has_fit_parameter(estimator, parameter): """Checks whether the estimator's fit method supports the given parameter. Examples -------- >>> from sklearn.svm import SVC >>> has_fit_parameter(SVC(), "sample_weight") True """ return parameter in signature(estimator.fit).parameters def check_symmetric(array, tol=1E-10, raise_warning=True, raise_exception=False): """Make sure that array is 2D, square and symmetric. If the array is not symmetric, then a symmetrized version is returned. Optionally, a warning or exception is raised if the matrix is not symmetric. Parameters ---------- array : nd-array or sparse matrix Input object to check / convert. Must be two-dimensional and square, otherwise a ValueError will be raised. tol : float Absolute tolerance for equivalence of arrays. Default = 1E-10. raise_warning : boolean (default=True) If True then raise a warning if conversion is required. raise_exception : boolean (default=False) If True then raise an exception if array is not symmetric. Returns ------- array_sym : ndarray or sparse matrix Symmetrized version of the input array, i.e. the average of array and array.transpose(). If sparse, then duplicate entries are first summed and zeros are eliminated. """ if (array.ndim != 2) or (array.shape[0] != array.shape[1]): raise ValueError("array must be 2-dimensional and square. " "shape = {0}".format(array.shape)) if sp.issparse(array): diff = array - array.T # only csr, csc, and coo have `data` attribute if diff.format not in ['csr', 'csc', 'coo']: diff = diff.tocsr() symmetric = np.all(abs(diff.data) < tol) else: symmetric = np.allclose(array, array.T, atol=tol) if not symmetric: if raise_exception: raise ValueError("Array must be symmetric") if raise_warning: warnings.warn("Array is not symmetric, and will be converted " "to symmetric by average with its transpose.") if sp.issparse(array): conversion = 'to' + array.format array = getattr(0.5 * (array + array.T), conversion)() else: array = 0.5 * (array + array.T) return array def check_is_fitted(estimator, attributes, msg=None, all_or_any=all): """Perform is_fitted validation for estimator. Checks if the estimator is fitted by verifying the presence of "all_or_any" of the passed attributes and raises a NotFittedError with the given message. Parameters ---------- estimator : estimator instance. estimator instance for which the check is performed. attributes : attribute name(s) given as string or a list/tuple of strings Eg. : ["coef_", "estimator_", ...], "coef_" msg : string The default error message is, "This %(name)s instance is not fitted yet. Call 'fit' with appropriate arguments before using this method." For custom messages if "%(name)s" is present in the message string, it is substituted for the estimator name. Eg. : "Estimator, %(name)s, must be fitted before sparsifying". all_or_any : callable, {all, any}, default all Specify whether all or any of the given attributes must exist. """ if msg is None: msg = ("This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this method.") if not hasattr(estimator, 'fit'): raise TypeError("%s is not an estimator instance." % (estimator)) if not isinstance(attributes, (list, tuple)): attributes = [attributes] if not all_or_any([hasattr(estimator, attr) for attr in attributes]): raise NotFittedError(msg % {'name': type(estimator).__name__}) def check_non_negative(X, whom): """ Check if there is any negative value in an array. Parameters ---------- X : array-like or sparse matrix Input data. whom : string Who passed X to this function. """ X = X.data if sp.issparse(X) else X if (X < 0).any(): raise ValueError("Negative values in data passed to %s" % whom)
bsd-3-clause
brianmckenna/sci-wms
wms/mpl_handler.py
1
10220
# -*- coding: utf-8 -*- from django.http import HttpResponse import pyproj import numpy as np import matplotlib as mpl from matplotlib.figure import Figure from matplotlib.backends.backend_agg import FigureCanvasAgg from wms import logger import wms.add_cmaps DEFAULT_HATCHES = ['.', '+', '*', '-', '/', ',', '\\', 'x', 'o', '[', ']', '^', '_', '`', '#', '"', "'", '(', ')', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~'] def _get_common_params(request): bbox = request.GET['bbox'] width = request.GET['width'] height = request.GET['height'] colormap = request.GET['colormap'] colorscalerange = request.GET['colorscalerange'] cmin = colorscalerange.min cmax = colorscalerange.max crs = request.GET['crs'] params = (bbox, width, height, colormap, cmin, cmax, crs ) return params def tripcolor_response(tri_subset, data, request, data_location=None, dpi=None): """ triang_subset is a matplotlib.Tri object in lat/lon units (will be converted to projected coordinates) xmin, ymin, xmax, ymax is the bounding pox of the plot in PROJETED COORDINATES!!! request is the original getMap request object """ dpi = dpi or 80. bbox = request.GET['bbox'] width = request.GET['width'] height = request.GET['height'] colormap = request.GET['colormap'] colorscalerange = request.GET['colorscalerange'] cmin = colorscalerange.min cmax = colorscalerange.max crs = request.GET['crs'] EPSG4326 = pyproj.Proj(init='EPSG:4326') tri_subset.x, tri_subset.y = pyproj.transform(EPSG4326, crs, tri_subset.x, tri_subset.y) fig = Figure(dpi=dpi, facecolor='none', edgecolor='none') fig.set_alpha(0) fig.set_figheight(height/dpi) fig.set_figwidth(width/dpi) ax = fig.add_axes([0., 0., 1., 1.], xticks=[], yticks=[]) ax.set_axis_off() if request.GET['logscale'] is True: norm_func = mpl.colors.LogNorm else: norm_func = mpl.colors.Normalize # Set out of bound data to NaN so it shows transparent? # Set to black like ncWMS? # Configurable by user? if cmin is not None and cmax is not None: norm = norm_func(vmin=cmin, vmax=cmax, clip=True) else: norm = norm_func() if data_location == 'face': ax.tripcolor(tri_subset, facecolors=data, edgecolors='none', norm=norm, cmap=colormap) else: ax.tripcolor(tri_subset, data, edgecolors='none', norm=norm, cmap=colormap) ax.set_xlim(bbox.minx, bbox.maxx) ax.set_ylim(bbox.miny, bbox.maxy) ax.set_frame_on(False) ax.set_clip_on(False) ax.set_position([0., 0., 1., 1.]) canvas = FigureCanvasAgg(fig) response = HttpResponse(content_type='image/png') canvas.print_png(response) return response def tricontouring_response(tri_subset, data, request, dpi=None): """ triang_subset is a matplotlib.Tri object in lat/lon units (will be converted to projected coordinates) xmin, ymin, xmax, ymax is the bounding pox of the plot in PROJETED COORDINATES!!! request is the original getMap request object """ dpi = dpi or 80. bbox = request.GET['bbox'] width = request.GET['width'] height = request.GET['height'] colormap = request.GET['colormap'] colorscalerange = request.GET['colorscalerange'] cmin = colorscalerange.min cmax = colorscalerange.max crs = request.GET['crs'] nlvls = request.GET['numcontours'] EPSG4326 = pyproj.Proj(init='EPSG:4326') tri_subset.x, tri_subset.y = pyproj.transform(EPSG4326, crs, tri_subset.x, tri_subset.y) fig = Figure(dpi=dpi, facecolor='none', edgecolor='none') fig.set_alpha(0) fig.set_figheight(height/dpi) fig.set_figwidth(width/dpi) ax = fig.add_axes([0., 0., 1., 1.], xticks=[], yticks=[]) ax.set_axis_off() if request.GET['logscale'] is True: norm_func = mpl.colors.LogNorm else: norm_func = mpl.colors.Normalize # Set out of bound data to NaN so it shows transparent? # Set to black like ncWMS? # Configurable by user? if cmin is not None and cmax is not None: lvls = np.linspace(cmin, cmax, nlvls) norm = norm_func(vmin=cmin, vmax=cmax, clip=True) else: lvls = nlvls norm = norm_func() if request.GET['image_type'] == 'filledcontours': ax.tricontourf(tri_subset, data, lvls, norm=norm, cmap=colormap, extend='both') elif request.GET['image_type'] == 'contours': ax.tricontour(tri_subset, data, lvls, norm=norm, cmap=colormap, extend='both') ax.set_xlim(bbox.minx, bbox.maxx) ax.set_ylim(bbox.miny, bbox.maxy) ax.set_frame_on(False) ax.set_clip_on(False) ax.set_position([0., 0., 1., 1.]) canvas = FigureCanvasAgg(fig) response = HttpResponse(content_type='image/png') canvas.print_png(response) return response def quiver_response(lon, lat, dx, dy, request, dpi=None): dpi = dpi or 80. bbox = request.GET['bbox'] width = request.GET['width'] height = request.GET['height'] colormap = request.GET['colormap'] colorscalerange = request.GET['colorscalerange'] vectorscale = request.GET['vectorscale'] cmin = colorscalerange.min cmax = colorscalerange.max crs = request.GET['crs'] unit_vectors = None # We don't support requesting these yet, but wouldn't be hard EPSG4326 = pyproj.Proj(init='EPSG:4326') x, y = pyproj.transform(EPSG4326, crs, lon, lat) # TODO order for non-inverse? fig = Figure(dpi=dpi, facecolor='none', edgecolor='none') fig.set_alpha(0) fig.set_figheight(height/dpi) fig.set_figwidth(width/dpi) ax = fig.add_axes([0., 0., 1., 1.], xticks=[], yticks=[]) ax.set_axis_off() mags = np.sqrt(dx**2 + dy**2) cmap = mpl.cm.get_cmap(colormap) if request.GET['logscale'] is True: norm_func = mpl.colors.LogNorm else: norm_func = mpl.colors.Normalize # Set out of bound data to NaN so it shows transparent? # Set to black like ncWMS? # Configurable by user? if cmin is not None and cmax is not None: norm = norm_func(vmin=cmin, vmax=cmax, clip=True) else: norm = norm_func() # plot unit vectors if unit_vectors: ax.quiver(x, y, dx/mags, dy/mags, mags, cmap=cmap, norm=norm, scale=vectorscale) else: ax.quiver(x, y, dx, dy, mags, cmap=cmap, norm=norm, scale=vectorscale) ax.set_xlim(bbox.minx, bbox.maxx) ax.set_ylim(bbox.miny, bbox.maxy) ax.set_frame_on(False) ax.set_clip_on(False) ax.set_position([0., 0., 1., 1.]) canvas = FigureCanvasAgg(fig) response = HttpResponse(content_type='image/png') canvas.print_png(response) return response def contouring_response(lon, lat, data, request, dpi=None): dpi = dpi or 80. bbox, width, height, colormap, cmin, cmax, crs = _get_common_params(request) nlvls = request.GET['numcontours'] EPSG4326 = pyproj.Proj(init='EPSG:4326') x, y = pyproj.transform(EPSG4326, crs, lon, lat) fig = Figure(dpi=dpi, facecolor='none', edgecolor='none') fig.set_alpha(0) fig.set_figheight(height/dpi) fig.set_figwidth(width/dpi) ax = fig.add_axes([0., 0., 1., 1.], xticks=[], yticks=[]) ax.set_axis_off() if request.GET['logscale'] is True: norm_func = mpl.colors.LogNorm else: norm_func = mpl.colors.Normalize if cmin is not None and cmax is not None: lvls = np.linspace(cmin, cmax, nlvls) norm = norm_func(vmin=cmin, vmax=cmax, clip=True) else: lvls = nlvls norm = norm_func() if request.GET['image_type'] == 'filledcontours': ax.contourf(x, y, data, lvls, norm=norm, cmap=colormap, extend='both') elif request.GET['image_type'] == 'contours': ax.contour(x, y, data, lvls, norm=norm, cmap=colormap, extend='both') elif request.GET['image_type'] == 'filledhatches': hatches = DEFAULT_HATCHES[:lvls] ax.contourf(x, y, data, lvls, norm=norm, cmap=colormap, hatches=hatches, extend='both') elif request.GET['image_type'] == 'hatches': hatches = DEFAULT_HATCHES[:lvls] ax.contourf(x, y, data, lvls, norm=norm, colors='none', hatches=hatches, extend='both') ax.set_xlim(bbox.minx, bbox.maxx) ax.set_ylim(bbox.miny, bbox.maxy) ax.set_frame_on(False) ax.set_clip_on(False) ax.set_position([0., 0., 1., 1.]) canvas = FigureCanvasAgg(fig) response = HttpResponse(content_type='image/png') canvas.print_png(response) return response def pcolormesh_response(lon, lat, data, request, dpi=None): dpi = dpi or 80. bbox, width, height, colormap, cmin, cmax, crs = _get_common_params(request) EPSG4326 = pyproj.Proj(init='EPSG:4326') x, y = pyproj.transform(EPSG4326, crs, lon, lat) fig = Figure(dpi=dpi, facecolor='none', edgecolor='none') fig.set_alpha(0) fig.set_figheight(height/dpi) fig.set_figwidth(width/dpi) ax = fig.add_axes([0., 0., 1., 1.], xticks=[], yticks=[]) ax.set_axis_off() if request.GET['logscale'] is True: norm_func = mpl.colors.LogNorm else: norm_func = mpl.colors.Normalize if cmin is not None and cmax is not None: norm = norm = norm_func(vmin=cmin, vmax=cmax, clip=True) else: norm = norm_func() masked = np.ma.masked_invalid(data) ax.pcolormesh(x, y, masked, norm=norm, cmap=colormap) ax.set_xlim(bbox.minx, bbox.maxx) ax.set_ylim(bbox.miny, bbox.maxy) ax.set_frame_on(False) ax.set_clip_on(False) ax.set_position([0., 0., 1., 1.]) canvas = FigureCanvasAgg(fig) response = HttpResponse(content_type='image/png') canvas.print_png(response) return response
gpl-3.0
surligas/cs436-gnuradio
gnuradio-runtime/examples/volk_benchmark/volk_plot.py
78
6117
#!/usr/bin/env python import sys, math import argparse from volk_test_funcs import * try: import matplotlib import matplotlib.pyplot as plt except ImportError: sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n") sys.exit(1) def main(): desc='Plot Volk performance results from a SQLite database. ' + \ 'Run one of the volk tests first (e.g, volk_math.py)' parser = argparse.ArgumentParser(description=desc) parser.add_argument('-D', '--database', type=str, default='volk_results.db', help='Database file to read data from [default: %(default)s]') parser.add_argument('-E', '--errorbars', action='store_true', default=False, help='Show error bars (1 standard dev.)') parser.add_argument('-P', '--plot', type=str, choices=['mean', 'min', 'max'], default='mean', help='Set the type of plot to produce [default: %(default)s]') parser.add_argument('-%', '--percent', type=str, default=None, metavar="table", help='Show percent difference to the given type [default: %(default)s]') args = parser.parse_args() # Set up global plotting properties matplotlib.rcParams['figure.subplot.bottom'] = 0.2 matplotlib.rcParams['figure.subplot.top'] = 0.95 matplotlib.rcParams['figure.subplot.right'] = 0.98 matplotlib.rcParams['ytick.labelsize'] = 16 matplotlib.rcParams['xtick.labelsize'] = 16 matplotlib.rcParams['legend.fontsize'] = 18 # Get list of tables to compare conn = create_connection(args.database) tables = list_tables(conn) M = len(tables) # Colors to distinguish each table in the bar graph # More than 5 tables will wrap around to the start. colors = ['b', 'r', 'g', 'm', 'k'] # Set up figure for plotting f0 = plt.figure(0, facecolor='w', figsize=(14,10)) s0 = f0.add_subplot(1,1,1) # Create a register of names that exist in all tables tmp_regs = [] for table in tables: # Get results from the next table res = get_results(conn, table[0]) tmp_regs.append(list()) for r in res: try: tmp_regs[-1].index(r['kernel']) except ValueError: tmp_regs[-1].append(r['kernel']) # Get only those names that are common in all tables name_reg = tmp_regs[0] for t in tmp_regs[1:]: name_reg = list(set(name_reg) & set(t)) name_reg.sort() # Pull the data out for each table into a dictionary # we can ref the table by it's name and the data associated # with a given kernel in name_reg by it's name. # This ensures there is no sorting issue with the data in the # dictionary, so the kernels are plotted against each other. table_data = dict() for i,table in enumerate(tables): # Get results from the next table res = get_results(conn, table[0]) data = dict() for r in res: data[r['kernel']] = r table_data[table[0]] = data if args.percent is not None: for i,t in enumerate(table_data): if args.percent == t: norm_data = [] for name in name_reg: if(args.plot == 'max'): norm_data.append(table_data[t][name]['max']) elif(args.plot == 'min'): norm_data.append(table_data[t][name]['min']) elif(args.plot == 'mean'): norm_data.append(table_data[t][name]['avg']) # Plot the results x0 = xrange(len(name_reg)) i = 0 for t in (table_data): ydata = [] stds = [] for name in name_reg: stds.append(math.sqrt(table_data[t][name]['var'])) if(args.plot == 'max'): ydata.append(table_data[t][name]['max']) elif(args.plot == 'min'): ydata.append(table_data[t][name]['min']) elif(args.plot == 'mean'): ydata.append(table_data[t][name]['avg']) if args.percent is not None: ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)] if(args.percent != t): # makes x values for this data set placement # width of bars depends on number of comparisons wdth = 0.80/(M-1) x1 = [x + i*wdth for x in x0] i += 1 s0.bar(x1, ydata, width=wdth, color=colors[(i-1)%M], label=t, edgecolor='k', linewidth=2) else: # makes x values for this data set placement # width of bars depends on number of comparisons wdth = 0.80/M x1 = [x + i*wdth for x in x0] i += 1 if(args.errorbars is False): s0.bar(x1, ydata, width=wdth, color=colors[(i-1)%M], label=t, edgecolor='k', linewidth=2) else: s0.bar(x1, ydata, width=wdth, yerr=stds, color=colors[i%M], label=t, edgecolor='k', linewidth=2, error_kw={"ecolor": 'k', "capsize":5, "linewidth":2}) nitems = res[0]['nitems'] if args.percent is None: s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems), fontsize=22, fontweight='bold', horizontalalignment='center') else: s0.set_ylabel("% Improvement over {0} [{1:G} items]".format( args.percent, nitems), fontsize=22, fontweight='bold') s0.legend() s0.set_xticks(x0) s0.set_xticklabels(name_reg) for label in s0.xaxis.get_ticklabels(): label.set_rotation(45) label.set_fontsize(16) plt.show() if __name__ == "__main__": main()
gpl-3.0
deepesch/scikit-learn
examples/svm/plot_custom_kernel.py
171
1546
""" ====================== SVM with custom kernel ====================== Simple usage of Support Vector Machines to classify a sample. It will plot the decision surface and the support vectors. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset Y = iris.target def my_kernel(X, Y): """ We create a custom kernel: (2 0) k(X, Y) = X ( ) Y.T (0 1) """ M = np.array([[2, 0], [0, 1.0]]) return np.dot(np.dot(X, M), Y.T) h = .02 # step size in the mesh # we create an instance of SVM and fit out data. clf = svm.SVC(kernel=my_kernel) clf.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.title('3-Class classification using Support Vector Machine with custom' ' kernel') plt.axis('tight') plt.show()
bsd-3-clause
tdhopper/scikit-learn
sklearn/linear_model/tests/test_coordinate_descent.py
114
25281
# Authors: Olivier Grisel <[email protected]> # Alexandre Gramfort <[email protected]> # License: BSD 3 clause from sys import version_info import numpy as np from scipy import interpolate, sparse from copy import deepcopy from sklearn.datasets import load_boston from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import TempMemmap from sklearn.linear_model.coordinate_descent import Lasso, \ LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \ MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path from sklearn.linear_model import LassoLarsCV, lars_path from sklearn.utils import check_array def check_warnings(): if version_info < (2, 6): raise SkipTest("Testing for warnings is not supported in versions \ older than Python 2.6") def test_lasso_zero(): # Check that the lasso can handle zero data without crashing X = [[0], [0], [0]] y = [0, 0, 0] clf = Lasso(alpha=0.1).fit(X, y) pred = clf.predict([[1], [2], [3]]) assert_array_almost_equal(clf.coef_, [0]) assert_array_almost_equal(pred, [0, 0, 0]) assert_almost_equal(clf.dual_gap_, 0) def test_lasso_toy(): # Test Lasso on a toy example for various values of alpha. # When validating this against glmnet notice that glmnet divides it # against nobs. X = [[-1], [0], [1]] Y = [-1, 0, 1] # just a straight line T = [[2], [3], [4]] # test sample clf = Lasso(alpha=1e-8) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) assert_almost_equal(clf.dual_gap_, 0) clf = Lasso(alpha=0.1) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [.85]) assert_array_almost_equal(pred, [1.7, 2.55, 3.4]) assert_almost_equal(clf.dual_gap_, 0) clf = Lasso(alpha=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [.25]) assert_array_almost_equal(pred, [0.5, 0.75, 1.]) assert_almost_equal(clf.dual_gap_, 0) clf = Lasso(alpha=1) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [.0]) assert_array_almost_equal(pred, [0, 0, 0]) assert_almost_equal(clf.dual_gap_, 0) def test_enet_toy(): # Test ElasticNet for various parameters of alpha and l1_ratio. # Actually, the parameters alpha = 0 should not be allowed. However, # we test it as a border case. # ElasticNet is tested with and without precomputed Gram matrix X = np.array([[-1.], [0.], [1.]]) Y = [-1, 0, 1] # just a straight line T = [[2.], [3.], [4.]] # test sample # this should be the same as lasso clf = ElasticNet(alpha=1e-8, l1_ratio=1.0) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100, precompute=False) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf.set_params(max_iter=100, precompute=True) clf.fit(X, Y) # with Gram pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf.set_params(max_iter=100, precompute=np.dot(X.T, X)) clf.fit(X, Y) # with Gram pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.50819], decimal=3) assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3) assert_almost_equal(clf.dual_gap_, 0) clf = ElasticNet(alpha=0.5, l1_ratio=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [0.45454], 3) assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3) assert_almost_equal(clf.dual_gap_, 0) def build_dataset(n_samples=50, n_features=200, n_informative_features=10, n_targets=1): """ build an ill-posed linear regression problem with many noisy features and comparatively few samples """ random_state = np.random.RandomState(0) if n_targets > 1: w = random_state.randn(n_features, n_targets) else: w = random_state.randn(n_features) w[n_informative_features:] = 0.0 X = random_state.randn(n_samples, n_features) y = np.dot(X, w) X_test = random_state.randn(n_samples, n_features) y_test = np.dot(X_test, w) return X, y, X_test, y_test def test_lasso_cv(): X, y, X_test, y_test = build_dataset() max_iter = 150 clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y) assert_almost_equal(clf.alpha_, 0.056, 2) clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True) clf.fit(X, y) assert_almost_equal(clf.alpha_, 0.056, 2) # Check that the lars and the coordinate descent implementation # select a similar alpha lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y) # for this we check that they don't fall in the grid of # clf.alphas further than 1 assert_true(np.abs( np.searchsorted(clf.alphas_[::-1], lars.alpha_) - np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1) # check that they also give a similar MSE mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T) np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(), clf.mse_path_[5].mean(), significant=2) # test set assert_greater(clf.score(X_test, y_test), 0.99) def test_lasso_cv_positive_constraint(): X, y, X_test, y_test = build_dataset() max_iter = 500 # Ensure the unconstrained fit has a negative coefficient clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1) clf_unconstrained.fit(X, y) assert_true(min(clf_unconstrained.coef_) < 0) # On same data, constrained fit has non-negative coefficients clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, positive=True, cv=2, n_jobs=1) clf_constrained.fit(X, y) assert_true(min(clf_constrained.coef_) >= 0) def test_lasso_path_return_models_vs_new_return_gives_same_coefficients(): # Test that lasso_path with lars_path style output gives the # same result # Some toy data X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T y = np.array([1, 2, 3.1]) alphas = [5., 1., .5] # Use lars_path and lasso_path(new output) with 1D linear interpolation # to compute the the same path alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso') coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1], coef_path_lars[:, ::-1]) alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas, return_models=False) coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1], coef_path_lasso2[:, ::-1]) assert_array_almost_equal( coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas), decimal=1) def test_enet_path(): # We use a large number of samples and of informative features so that # the l1_ratio selected is more toward ridge than lasso X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100, n_informative_features=100) max_iter = 150 # Here we have a small number of iterations, and thus the # ElasticNet might not converge. This is to speed up tests clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter) ignore_warnings(clf.fit)(X, y) # Well-conditioned settings, we should have selected our # smallest penalty assert_almost_equal(clf.alpha_, min(clf.alphas_)) # Non-sparse ground truth: we should have seleted an elastic-net # that is closer to ridge than to lasso assert_equal(clf.l1_ratio_, min(clf.l1_ratio)) clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter, precompute=True) ignore_warnings(clf.fit)(X, y) # Well-conditioned settings, we should have selected our # smallest penalty assert_almost_equal(clf.alpha_, min(clf.alphas_)) # Non-sparse ground truth: we should have seleted an elastic-net # that is closer to ridge than to lasso assert_equal(clf.l1_ratio_, min(clf.l1_ratio)) # We are in well-conditioned settings with low noise: we should # have a good test-set performance assert_greater(clf.score(X_test, y_test), 0.99) # Multi-output/target case X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3) clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter) ignore_warnings(clf.fit)(X, y) # We are in well-conditioned settings with low noise: we should # have a good test-set performance assert_greater(clf.score(X_test, y_test), 0.99) assert_equal(clf.coef_.shape, (3, 10)) # Mono-output should have same cross-validated alpha_ and l1_ratio_ # in both cases. X, y, _, _ = build_dataset(n_features=10) clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) clf1.fit(X, y) clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) clf2.fit(X, y[:, np.newaxis]) assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_) assert_almost_equal(clf1.alpha_, clf2.alpha_) def test_path_parameters(): X, y, _, _ = build_dataset() max_iter = 100 clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter, l1_ratio=0.5, tol=1e-3) clf.fit(X, y) # new params assert_almost_equal(0.5, clf.l1_ratio) assert_equal(50, clf.n_alphas) assert_equal(50, len(clf.alphas_)) def test_warm_start(): X, y, _, _ = build_dataset() clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True) ignore_warnings(clf.fit)(X, y) ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations clf2 = ElasticNet(alpha=0.1, max_iter=10) ignore_warnings(clf2.fit)(X, y) assert_array_almost_equal(clf2.coef_, clf.coef_) def test_lasso_alpha_warning(): X = [[-1], [0], [1]] Y = [-1, 0, 1] # just a straight line clf = Lasso(alpha=0) assert_warns(UserWarning, clf.fit, X, Y) def test_lasso_positive_constraint(): X = [[-1], [0], [1]] y = [1, 0, -1] # just a straight line with negative slope lasso = Lasso(alpha=0.1, max_iter=1000, positive=True) lasso.fit(X, y) assert_true(min(lasso.coef_) >= 0) lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True) lasso.fit(X, y) assert_true(min(lasso.coef_) >= 0) def test_enet_positive_constraint(): X = [[-1], [0], [1]] y = [1, 0, -1] # just a straight line with negative slope enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True) enet.fit(X, y) assert_true(min(enet.coef_) >= 0) def test_enet_cv_positive_constraint(): X, y, X_test, y_test = build_dataset() max_iter = 500 # Ensure the unconstrained fit has a negative coefficient enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1) enetcv_unconstrained.fit(X, y) assert_true(min(enetcv_unconstrained.coef_) < 0) # On same data, constrained fit has non-negative coefficients enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, positive=True, n_jobs=1) enetcv_constrained.fit(X, y) assert_true(min(enetcv_constrained.coef_) >= 0) def test_uniform_targets(): enet = ElasticNetCV(fit_intercept=True, n_alphas=3) m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3) lasso = LassoCV(fit_intercept=True, n_alphas=3) m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3) models_single_task = (enet, lasso) models_multi_task = (m_enet, m_lasso) rng = np.random.RandomState(0) X_train = rng.random_sample(size=(10, 3)) X_test = rng.random_sample(size=(10, 3)) y1 = np.empty(10) y2 = np.empty((10, 2)) for model in models_single_task: for y_values in (0, 5): y1.fill(y_values) assert_array_equal(model.fit(X_train, y1).predict(X_test), y1) assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3) for model in models_multi_task: for y_values in (0, 5): y2[:, 0].fill(y_values) y2[:, 1].fill(2 * y_values) assert_array_equal(model.fit(X_train, y2).predict(X_test), y2) assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3) def test_multi_task_lasso_and_enet(): X, y, X_test, y_test = build_dataset() Y = np.c_[y, y] # Y_test = np.c_[y_test, y_test] clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) assert_true(0 < clf.dual_gap_ < 1e-5) assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y) assert_true(0 < clf.dual_gap_ < 1e-5) assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) def test_lasso_readonly_data(): X = np.array([[-1], [0], [1]]) Y = np.array([-1, 0, 1]) # just a straight line T = np.array([[2], [3], [4]]) # test sample with TempMemmap((X, Y)) as (X, Y): clf = Lasso(alpha=0.5) clf.fit(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [.25]) assert_array_almost_equal(pred, [0.5, 0.75, 1.]) assert_almost_equal(clf.dual_gap_, 0) def test_multi_task_lasso_readonly_data(): X, y, X_test, y_test = build_dataset() Y = np.c_[y, y] with TempMemmap((X, Y)) as (X, Y): Y = np.c_[y, y] clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y) assert_true(0 < clf.dual_gap_ < 1e-5) assert_array_almost_equal(clf.coef_[0], clf.coef_[1]) def test_enet_multitarget(): n_targets = 3 X, y, _, _ = build_dataset(n_samples=10, n_features=8, n_informative_features=10, n_targets=n_targets) estimator = ElasticNet(alpha=0.01, fit_intercept=True) estimator.fit(X, y) coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_, estimator.dual_gap_) for k in range(n_targets): estimator.fit(X, y[:, k]) assert_array_almost_equal(coef[k, :], estimator.coef_) assert_array_almost_equal(intercept[k], estimator.intercept_) assert_array_almost_equal(dual_gap[k], estimator.dual_gap_) def test_multioutput_enetcv_error(): X = np.random.randn(10, 2) y = np.random.randn(10, 2) clf = ElasticNetCV() assert_raises(ValueError, clf.fit, X, y) def test_multitask_enet_and_lasso_cv(): X, y, _, _ = build_dataset(n_features=100, n_targets=3) clf = MultiTaskElasticNetCV().fit(X, y) assert_almost_equal(clf.alpha_, 0.00556, 3) clf = MultiTaskLassoCV().fit(X, y) assert_almost_equal(clf.alpha_, 0.00278, 3) X, y, _, _ = build_dataset(n_targets=3) clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100, l1_ratio=[0.3, 0.5], tol=1e-3) clf.fit(X, y) assert_equal(0.5, clf.l1_ratio_) assert_equal((3, X.shape[1]), clf.coef_.shape) assert_equal((3, ), clf.intercept_.shape) assert_equal((2, 50, 3), clf.mse_path_.shape) assert_equal((2, 50), clf.alphas_.shape) X, y, _, _ = build_dataset(n_targets=3) clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3) clf.fit(X, y) assert_equal((3, X.shape[1]), clf.coef_.shape) assert_equal((3, ), clf.intercept_.shape) assert_equal((50, 3), clf.mse_path_.shape) assert_equal(50, len(clf.alphas_)) def test_1d_multioutput_enet_and_multitask_enet_cv(): X, y, _, _ = build_dataset(n_features=10) y = y[:, np.newaxis] clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) clf.fit(X, y[:, 0]) clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7]) clf1.fit(X, y) assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_) assert_almost_equal(clf.alpha_, clf1.alpha_) assert_almost_equal(clf.coef_, clf1.coef_[0]) assert_almost_equal(clf.intercept_, clf1.intercept_[0]) def test_1d_multioutput_lasso_and_multitask_lasso_cv(): X, y, _, _ = build_dataset(n_features=10) y = y[:, np.newaxis] clf = LassoCV(n_alphas=5, eps=2e-3) clf.fit(X, y[:, 0]) clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3) clf1.fit(X, y) assert_almost_equal(clf.alpha_, clf1.alpha_) assert_almost_equal(clf.coef_, clf1.coef_[0]) assert_almost_equal(clf.intercept_, clf1.intercept_[0]) def test_sparse_input_dtype_enet_and_lassocv(): X, y, _, _ = build_dataset(n_features=10) clf = ElasticNetCV(n_alphas=5) clf.fit(sparse.csr_matrix(X), y) clf1 = ElasticNetCV(n_alphas=5) clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y) assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) clf = LassoCV(n_alphas=5) clf.fit(sparse.csr_matrix(X), y) clf1 = LassoCV(n_alphas=5) clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y) assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6) assert_almost_equal(clf.coef_, clf1.coef_, decimal=6) def test_precompute_invalid_argument(): X, y, _, _ = build_dataset() for clf in [ElasticNetCV(precompute="invalid"), LassoCV(precompute="invalid")]: assert_raises(ValueError, clf.fit, X, y) def test_warm_start_convergence(): X, y, _, _ = build_dataset() model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y) n_iter_reference = model.n_iter_ # This dataset is not trivial enough for the model to converge in one pass. assert_greater(n_iter_reference, 2) # Check that n_iter_ is invariant to multiple calls to fit # when warm_start=False, all else being equal. model.fit(X, y) n_iter_cold_start = model.n_iter_ assert_equal(n_iter_cold_start, n_iter_reference) # Fit the same model again, using a warm start: the optimizer just performs # a single pass before checking that it has already converged model.set_params(warm_start=True) model.fit(X, y) n_iter_warm_start = model.n_iter_ assert_equal(n_iter_warm_start, 1) def test_warm_start_convergence_with_regularizer_decrement(): boston = load_boston() X, y = boston.data, boston.target # Train a model to converge on a lightly regularized problem final_alpha = 1e-5 low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y) # Fitting a new model on a more regularized version of the same problem. # Fitting with high regularization is easier it should converge faster # in general. high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y) assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_) # Fit the solution to the original, less regularized version of the # problem but from the solution of the highly regularized variant of # the problem as a better starting point. This should also converge # faster than the original model that starts from zero. warm_low_reg_model = deepcopy(high_reg_model) warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha) warm_low_reg_model.fit(X, y) assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_) def test_random_descent(): # Test that both random and cyclic selection give the same results. # Ensure that the test models fully converge and check a wide # range of conditions. # This uses the coordinate descent algo using the gram trick. X, y, _, _ = build_dataset(n_samples=50, n_features=20) clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8) clf_cyclic.fit(X, y) clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42) clf_random.fit(X, y) assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) # This uses the descent algo without the gram trick clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8) clf_cyclic.fit(X.T, y[:20]) clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42) clf_random.fit(X.T, y[:20]) assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) # Sparse Case clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8) clf_cyclic.fit(sparse.csr_matrix(X), y) clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42) clf_random.fit(sparse.csr_matrix(X), y) assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) # Multioutput case. new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis])) clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8) clf_cyclic.fit(X, new_y) clf_random = MultiTaskElasticNet(selection='random', tol=1e-8, random_state=42) clf_random.fit(X, new_y) assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_) assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_) # Raise error when selection is not in cyclic or random. clf_random = ElasticNet(selection='invalid') assert_raises(ValueError, clf_random.fit, X, y) def test_deprection_precompute_enet(): # Test that setting precompute="auto" gives a Deprecation Warning. X, y, _, _ = build_dataset(n_samples=20, n_features=10) clf = ElasticNet(precompute="auto") assert_warns(DeprecationWarning, clf.fit, X, y) clf = Lasso(precompute="auto") assert_warns(DeprecationWarning, clf.fit, X, y) def test_enet_path_positive(): # Test that the coefs returned by positive=True in enet_path are positive X, y, _, _ = build_dataset(n_samples=50, n_features=50) for path in [enet_path, lasso_path]: pos_path_coef = path(X, y, positive=True)[1] assert_true(np.all(pos_path_coef >= 0)) def test_sparse_dense_descent_paths(): # Test that dense and sparse input give the same input for descent paths. X, y, _, _ = build_dataset(n_samples=50, n_features=20) csr = sparse.csr_matrix(X) for path in [enet_path, lasso_path]: _, coefs, _ = path(X, y, fit_intercept=False) _, sparse_coefs, _ = path(csr, y, fit_intercept=False) assert_array_almost_equal(coefs, sparse_coefs) def test_check_input_false(): X, y, _, _ = build_dataset(n_samples=20, n_features=10) X = check_array(X, order='F', dtype='float64') y = check_array(X, order='F', dtype='float64') clf = ElasticNet(selection='cyclic', tol=1e-8) # Check that no error is raised if data is provided in the right format clf.fit(X, y, check_input=False) X = check_array(X, order='F', dtype='float32') clf.fit(X, y, check_input=True) # Check that an error is raised if data is provided in the wrong format, # because of check bypassing assert_raises(ValueError, clf.fit, X, y, check_input=False) # With no input checking, providing X in C order should result in false # computation X = check_array(X, order='C', dtype='float64') clf.fit(X, y, check_input=False) coef_false = clf.coef_ clf.fit(X, y, check_input=True) coef_true = clf.coef_ assert_raises(AssertionError, assert_array_almost_equal, coef_true, coef_false) def test_overrided_gram_matrix(): X, y, _, _ = build_dataset(n_samples=20, n_features=10) Gram = X.T.dot(X) clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram, fit_intercept=True) assert_warns_message(UserWarning, "Gram matrix was provided but X was centered" " to fit intercept, " "or X was normalized : recomputing Gram matrix.", clf.fit, X, y)
bsd-3-clause
chiahaoliu/2016_summer_XPD
out_of_date/matplotlib_demo/Rectangle Selector.py
2
2243
from __future__ import print_function """ Do a mouseclick somewhere, move the mouse to some destination, release the button. This class gives click- and release-events and also draws a line or a box from the click-point to the actual mouseposition (within the same axes) until the button is released. Within the method 'self.ignore()' it is checked wether the button from eventpress and eventrelease are the same. """ from matplotlib.widgets import RectangleSelector import numpy as np import matplotlib.pyplot as plt def line_select_callback(eclick, erelease): 'eclick and erelease are the press and release events' x1, y1 = eclick.xdata, eclick.ydata x2, y2 = erelease.xdata, erelease.ydata print("(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2)) print(" The button you used were: %s %s" % (eclick.button, erelease.button)) def toggle_selector(event): print(' Key pressed.') if event.key in ['Q', 'q'] and toggle_selector.RS.active: print(' RectangleSelector deactivated.') toggle_selector.RS.set_active(False) if event.key in ['A', 'a'] and not toggle_selector.RS.active: print(' RectangleSelector activated.') toggle_selector.RS.set_active(True) plt.figure(1) current_ax = plt.subplot2grid((5,5),(0,0),rowspan = 4, colspan = 4) #fig, current_ax = plt.subplots() # make a new plotingrange N = 100000 # If N is large one can see x = np.linspace(0.0, 10.0, N) # improvement by use blitting! current_ax.plot(x, +np.sin(.2*np.pi*x), lw=3.5, c='b', alpha=.7) # plot something current_ax.plot(x, +np.cos(.2*np.pi*x), lw=3.5, c='r', alpha=.5) current_ax.plot(x, -np.sin(.2*np.pi*x), lw=3.5, c='g', alpha=.3) print("\n click --> release") # drawtype is 'box' or 'line' or 'none' toggle_selector.RS = RectangleSelector(current_ax, line_select_callback, drawtype='box', useblit=True, button=[1, 3], # don't use middle button minspanx=5, minspany=5, spancoords='pixels') plt.connect('key_press_event', toggle_selector) plt.show()
bsd-2-clause
planetarymike/IDL-Colorbars
IDL_py_test/010_GREEN-PINK.py
1
7594
from matplotlib.colors import LinearSegmentedColormap from numpy import nan, inf cm_data = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0.054902, 0.], [0., 0.109804, 0.], [0., 0.168627, 0.], [0., 0.223529, 0.], [0., 0.282353, 0.], [0., 0.317647, 0.], [0., 0.352941, 0.], [0., 0.388235, 0.], [0., 0.423529, 0.], [0., 0.458824, 0.], [0., 0.494118, 0.], [0., 0.529412, 0.], [0., 0.564706, 0.], [0., 0.6, 0.], [0., 0.635294, 0.], [0., 0.670588, 0.], [0., 0.705882, 0.], [0., 0.701961, 0.00392157], [0., 0.698039, 0.0117647], [0., 0.694118, 0.0156863], [0., 0.690196, 0.0235294], [0., 0.686275, 0.0313725], [0., 0.682353, 0.0392157], [0., 0.678431, 0.0470588], [0., 0.67451, 0.054902], [0., 0.670588, 0.0588235], [0., 0.666667, 0.0666667], [0., 0.662745, 0.0745098], [0., 0.658824, 0.0823529], [0., 0.654902, 0.0901961], [0., 0.65098, 0.0980392], [0., 0.647059, 0.105882], [0., 0.643137, 0.113725], [0., 0.639216, 0.121569], [0., 0.635294, 0.129412], [0., 0.631373, 0.137255], [0., 0.627451, 0.145098], [0., 0.623529, 0.14902], [0., 0.619608, 0.156863], [0., 0.615686, 0.164706], [0., 0.611765, 0.172549], [0., 0.603922, 0.180392], [0., 0.596078, 0.188235], [0., 0.588235, 0.196078], [0., 0.580392, 0.207843], [0., 0.576471, 0.211765], [0., 0.572549, 0.219608], [0., 0.568627, 0.227451], [0., 0.564706, 0.235294], [0., 0.560784, 0.243137], [0., 0.556863, 0.25098], [0., 0.552941, 0.258824], [0., 0.54902, 0.266667], [0., 0.545098, 0.270588], [0., 0.541176, 0.278431], [0., 0.537255, 0.286275], [0., 0.533333, 0.294118], [0., 0.529412, 0.301961], [0., 0.52549, 0.309804], [0., 0.521569, 0.317647], [0., 0.517647, 0.32549], [0., 0.513725, 0.329412], [0., 0.509804, 0.337255], [0., 0.505882, 0.345098], [0., 0.501961, 0.352941], [0., 0.494118, 0.360784], [0., 0.486275, 0.368627], [0., 0.478431, 0.376471], [0., 0.470588, 0.388235], [0., 0.466667, 0.392157], [0., 0.462745, 0.4], [0., 0.458824, 0.407843], [0., 0.454902, 0.415686], [0., 0.45098, 0.423529], [0., 0.447059, 0.431373], [0., 0.443137, 0.439216], [0., 0.439216, 0.447059], [0., 0.435294, 0.454902], [0., 0.431373, 0.462745], [0., 0.427451, 0.470588], [0., 0.423529, 0.478431], [0., 0.419608, 0.482353], [0., 0.415686, 0.490196], [0., 0.411765, 0.498039], [0., 0.407843, 0.505882], [0., 0.403922, 0.513725], [0., 0.4, 0.521569], [0., 0.396078, 0.529412], [0., 0.392157, 0.537255], [0., 0.388235, 0.541176], [0., 0.384314, 0.54902], [0., 0.380392, 0.556863], [0., 0.376471, 0.564706], [0., 0.368627, 0.572549], [0., 0.360784, 0.580392], [0., 0.352941, 0.588235], [0., 0.345098, 0.6], [0., 0.341176, 0.603922], [0., 0.337255, 0.611765], [0., 0.333333, 0.619608], [0., 0.329412, 0.627451], [0., 0.32549, 0.635294], [0., 0.321569, 0.643137], [0., 0.317647, 0.65098], [0., 0.313725, 0.658824], [0., 0.309804, 0.666667], [0., 0.305882, 0.67451], [0., 0.301961, 0.682353], [0., 0.298039, 0.690196], [0., 0.294118, 0.694118], [0., 0.290196, 0.701961], [0., 0.286275, 0.709804], [0., 0.282353, 0.717647], [0., 0.278431, 0.72549], [0., 0.27451, 0.733333], [0., 0.270588, 0.741176], [0., 0.266667, 0.74902], [0., 0.258824, 0.756863], [0., 0.25098, 0.764706], [0., 0.243137, 0.772549], [0., 0.235294, 0.780392], [0.0196078, 0.231373, 0.780392], [0.0392157, 0.227451, 0.776471], [0.0588235, 0.223529, 0.772549], [0.0784314, 0.219608, 0.768627], [0.0980392, 0.211765, 0.768627], [0.117647, 0.203922, 0.768627], [0.137255, 0.196078, 0.768627], [0.156863, 0.188235, 0.764706], [0.176471, 0.184314, 0.764706], [0.196078, 0.180392, 0.760784], [0.215686, 0.176471, 0.756863], [0.235294, 0.172549, 0.752941], [0.254902, 0.164706, 0.752941], [0.27451, 0.156863, 0.74902], [0.294118, 0.14902, 0.74902], [0.313725, 0.141176, 0.745098], [0.329412, 0.133333, 0.745098], [0.345098, 0.12549, 0.745098], [0.360784, 0.117647, 0.745098], [0.376471, 0.109804, 0.745098], [0.396078, 0.101961, 0.745098], [0.415686, 0.0941176, 0.745098], [0.435294, 0.0862745, 0.745098], [0.454902, 0.0784314, 0.741176], [0.47451, 0.0745098, 0.741176], [0.494118, 0.0705882, 0.737255], [0.513725, 0.0666667, 0.733333], [0.533333, 0.0627451, 0.729412], [0.552941, 0.054902, 0.729412], [0.572549, 0.0470588, 0.72549], [0.592157, 0.0392157, 0.72549], [0.611765, 0.0313725, 0.721569], [0.623529, 0.0235294, 0.721569], [0.635294, 0.0156863, 0.72549], [0.647059, 0.00784314, 0.729412], [0.658824, 0., 0.733333], [0.670588, 0., 0.733333], [0.682353, 0., 0.733333], [0.694118, 0., 0.733333], [0.705882, 0., 0.733333], [0.717647, 0., 0.733333], [0.729412, 0., 0.733333], [0.741176, 0., 0.733333], [0.752941, 0., 0.737255], [0.764706, 0., 0.737255], [0.776471, 0., 0.737255], [0.788235, 0., 0.737255], [0.8, 0., 0.741176], [0.811765, 0., 0.741176], [0.823529, 0., 0.741176], [0.835294, 0., 0.741176], [0.847059, 0., 0.741176], [0.858824, 0., 0.741176], [0.870588, 0., 0.741176], [0.882353, 0., 0.741176], [0.894118, 0., 0.745098], [0.905882, 0., 0.745098], [0.917647, 0., 0.745098], [0.929412, 0., 0.745098], [0.941176, 0., 0.74902], [0.952941, 0., 0.74902], [0.964706, 0., 0.74902], [0.976471, 0., 0.74902], [0.988235, 0., 0.74902], [0.988235, 0.0156863, 0.752941], [0.988235, 0.0313725, 0.756863], [0.988235, 0.0470588, 0.760784], [0.988235, 0.0627451, 0.764706], [0.988235, 0.0784314, 0.768627], [0.988235, 0.0941176, 0.772549], [0.988235, 0.109804, 0.776471], [0.988235, 0.12549, 0.780392], [0.988235, 0.141176, 0.784314], [0.988235, 0.156863, 0.788235], [0.988235, 0.172549, 0.792157], [0.988235, 0.188235, 0.796078], [0.988235, 0.203922, 0.796078], [0.988235, 0.219608, 0.8], [0.988235, 0.235294, 0.803922], [0.988235, 0.25098, 0.807843], [0.988235, 0.266667, 0.811765], [0.988235, 0.282353, 0.815686], [0.988235, 0.298039, 0.819608], [0.988235, 0.313725, 0.823529], [0.988235, 0.329412, 0.827451], [0.988235, 0.345098, 0.831373], [0.988235, 0.360784, 0.835294], [0.988235, 0.376471, 0.839216], [0.988235, 0.392157, 0.839216], [0.988235, 0.407843, 0.843137], [0.988235, 0.423529, 0.847059], [0.988235, 0.439216, 0.85098], [0.988235, 0.454902, 0.854902], [0.988235, 0.470588, 0.858824], [0.988235, 0.486275, 0.862745], [0.988235, 0.501961, 0.866667], [0.988235, 0.517647, 0.870588], [0.988235, 0.533333, 0.87451], [0.988235, 0.54902, 0.878431], [0.988235, 0.564706, 0.882353], [0.988235, 0.576471, 0.886275], [0.988235, 0.588235, 0.890196], [0.988235, 0.6, 0.894118], [0.988235, 0.611765, 0.898039], [0.988235, 0.627451, 0.901961], [0.988235, 0.643137, 0.905882], [0.988235, 0.658824, 0.909804], [0.988235, 0.67451, 0.913725], [0.988235, 0.690196, 0.917647], [0.988235, 0.705882, 0.921569], [0.988235, 0.721569, 0.92549], [0.988235, 0.737255, 0.929412], [0.988235, 0.752941, 0.929412], [0.988235, 0.768627, 0.933333], [0.988235, 0.784314, 0.937255], [0.988235, 0.8, 0.941176], [0.988235, 0.815686, 0.945098], [0.988235, 0.831373, 0.94902], [0.988235, 0.847059, 0.952941], [0.988235, 0.862745, 0.956863], [0.988235, 0.878431, 0.960784], [0.988235, 0.894118, 0.964706], [0.988235, 0.909804, 0.968627], [0.988235, 0.92549, 0.972549], [0.988235, 0.941176, 0.976471], [0.988235, 0.956863, 0.980392], [0.988235, 0.972549, 0.984314], [0.988235, 0.988235, 0.988235], [0.992157, 0.992157, 0.992157], [0.996078, 0.996078, 0.996078], [1., 1., 1.]] test_cm = LinearSegmentedColormap.from_list(__file__, cm_data) if __name__ == "__main__": import matplotlib.pyplot as plt import numpy as np try: from pycam02ucs.cm.viscm import viscm viscm(test_cm) except ImportError: print("pycam02ucs not found, falling back on simple display") plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=test_cm) plt.show()
gpl-2.0
treycausey/scikit-learn
sklearn/metrics/tests/test_pairwise.py
20
21057
import numpy as np from numpy import linalg from scipy.sparse import dok_matrix, csr_matrix, issparse from scipy.spatial.distance import cosine, cityblock, minkowski from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.externals.six import iteritems from sklearn.metrics.pairwise import euclidean_distances from sklearn.metrics.pairwise import manhattan_distances from sklearn.metrics.pairwise import linear_kernel from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel from sklearn.metrics.pairwise import polynomial_kernel from sklearn.metrics.pairwise import rbf_kernel from sklearn.metrics.pairwise import sigmoid_kernel from sklearn.metrics.pairwise import cosine_similarity from sklearn.metrics.pairwise import cosine_distances from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_distances_argmin_min from sklearn.metrics.pairwise import pairwise_distances_argmin from sklearn.metrics.pairwise import pairwise_kernels from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS from sklearn.metrics.pairwise import PAIRED_DISTANCES from sklearn.metrics.pairwise import check_pairwise_arrays from sklearn.metrics.pairwise import check_paired_arrays from sklearn.metrics.pairwise import _parallel_pairwise from sklearn.metrics.pairwise import paired_distances from sklearn.metrics.pairwise import paired_euclidean_distances from sklearn.metrics.pairwise import paired_manhattan_distances from sklearn.preprocessing import normalize def test_pairwise_distances(): """ Test the pairwise_distance helper function. """ rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) S = pairwise_distances(X, metric="euclidean") S2 = euclidean_distances(X) assert_array_almost_equal(S, S2) # Euclidean distance, with Y != X. Y = rng.random_sample((2, 4)) S = pairwise_distances(X, Y, metric="euclidean") S2 = euclidean_distances(X, Y) assert_array_almost_equal(S, S2) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean") assert_array_almost_equal(S, S2) # "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial. S = pairwise_distances(X, metric="cityblock") S2 = pairwise_distances(X, metric=cityblock) assert_equal(S.shape[0], S.shape[1]) assert_equal(S.shape[0], X.shape[0]) assert_array_almost_equal(S, S2) # The manhattan metric should be equivalent to cityblock. S = pairwise_distances(X, Y, metric="manhattan") S2 = pairwise_distances(X, Y, metric=cityblock) assert_equal(S.shape[0], X.shape[0]) assert_equal(S.shape[1], Y.shape[0]) assert_array_almost_equal(S, S2) # Low-level function for manhattan can divide in blocks to avoid # using too much memory during the broadcasting S3 = manhattan_distances(X, Y, size_threshold=10) assert_array_almost_equal(S, S3) # Test cosine as a string metric versus cosine callable # "cosine" uses sklearn metric, cosine (function) is scipy.spatial S = pairwise_distances(X, Y, metric="cosine") S2 = pairwise_distances(X, Y, metric=cosine) assert_equal(S.shape[0], X.shape[0]) assert_equal(S.shape[1], Y.shape[0]) assert_array_almost_equal(S, S2) # Tests that precomputed metric returns pointer to, and not copy of, X. S = np.dot(X, X.T) S2 = pairwise_distances(S, metric="precomputed") assert_true(S is S2) # Test with sparse X and Y, # currently only supported for Euclidean, L1 and cosine. X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean") S2 = euclidean_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse, metric="cosine") S2 = cosine_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan") S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo()) assert_array_almost_equal(S, S2) S2 = manhattan_distances(X, Y) assert_array_almost_equal(S, S2) # Test with scipy.spatial.distance metric, with a kwd kwds = {"p": 2.0} S = pairwise_distances(X, Y, metric="minkowski", **kwds) S2 = pairwise_distances(X, Y, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # same with Y = None kwds = {"p": 2.0} S = pairwise_distances(X, metric="minkowski", **kwds) S2 = pairwise_distances(X, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # Test that scipy distance metrics throw an error if sparse matrix given assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski") assert_raises(TypeError, pairwise_distances, X, Y_sparse, metric="minkowski") # Test that a value error is raised if the metric is unkown assert_raises(ValueError, pairwise_distances, X, Y, metric="blah") def test_pairwise_parallel(): rng = np.random.RandomState(0) for func in (np.array, csr_matrix): X = func(rng.random_sample((5, 4))) Y = func(rng.random_sample((3, 4))) S = euclidean_distances(X) S2 = _parallel_pairwise(X, None, euclidean_distances, n_jobs=3) assert_array_almost_equal(S, S2) S = euclidean_distances(X, Y) S2 = _parallel_pairwise(X, Y, euclidean_distances, n_jobs=3) assert_array_almost_equal(S, S2) def test_pairwise_kernels(): """ Test the pairwise_kernels helper function. """ def callable_rbf_kernel(x, y, **kwds): """ Callable version of pairwise.rbf_kernel. """ K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds) return K rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((2, 4)) # Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS. test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2", "additive_chi2"] for metric in test_metrics: function = PAIRWISE_KERNEL_FUNCTIONS[metric] # Test with Y=None K1 = pairwise_kernels(X, metric=metric) K2 = function(X) assert_array_almost_equal(K1, K2) # Test with Y=Y K1 = pairwise_kernels(X, Y=Y, metric=metric) K2 = function(X, Y=Y) assert_array_almost_equal(K1, K2) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric) assert_array_almost_equal(K1, K2) # Test with sparse X and Y X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) if metric in ["chi2", "additive_chi2"]: # these don't support sparse matrices yet assert_raises(ValueError, pairwise_kernels, X_sparse, Y=Y_sparse, metric=metric) continue K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric) assert_array_almost_equal(K1, K2) # Test with a callable function, with given keywords. metric = callable_rbf_kernel kwds = {} kwds['gamma'] = 0.1 K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds) K2 = rbf_kernel(X, Y=Y, **kwds) assert_array_almost_equal(K1, K2) # callable function, X=Y K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds) K2 = rbf_kernel(X, Y=X, **kwds) assert_array_almost_equal(K1, K2) def test_pairwise_kernels_filter_param(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((2, 4)) K = rbf_kernel(X, Y, gamma=0.1) params = {"gamma": 0.1, "blabla": ":)"} K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params) assert_array_almost_equal(K, K2) assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params) def test_paired_distances(): """ Test the pairwise_distance helper function. """ rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) # Euclidean distance, with Y != X. Y = rng.random_sample((5, 4)) for metric, func in iteritems(PAIRED_DISTANCES): S = paired_distances(X, Y, metric=metric) S2 = func(X, Y) assert_array_almost_equal(S, S2) if metric in PAIRWISE_DISTANCE_FUNCTIONS: # Check the the pairwise_distances implementation # gives the same value distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y) distances = np.diag(distances) assert_array_almost_equal(distances, S) # Check the callable implementation S = paired_distances(X, Y, metric='manhattan') S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x -y).sum(axis=0)) assert_array_almost_equal(S, S2) # Test that a value error is raised when the lengths of X and Y should not # differ Y = rng.random_sample((3, 4)) assert_raises(ValueError, paired_distances, X, Y) def test_pairwise_distances_argmin_min(): """ Check pairwise minimum distances computation for any metric""" X = [[0], [1]] Y = [[-1], [2]] Xsp = dok_matrix(X) Ysp = csr_matrix(Y, dtype=np.float32) # euclidean metric D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean") D2 = pairwise_distances_argmin(X, Y, metric="euclidean") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(D2, [0, 1]) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # sparse matrix case Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean") assert_array_equal(Dsp, D) assert_array_equal(Esp, E) # We don't want np.matrix here assert_equal(type(Dsp), np.ndarray) assert_equal(type(Esp), np.ndarray) # Non-euclidean sklearn metric D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan") D2 = pairwise_distances_argmin(X, Y, metric="manhattan") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(D2, [0, 1]) assert_array_almost_equal(E, [1., 1.]) D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan") D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Non-euclidean Scipy distance (callable) D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski, metric_kwargs={"p": 2}) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Non-euclidean Scipy distance (string) D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski", metric_kwargs={"p": 2}) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Compare with naive implementation rng = np.random.RandomState(0) X = rng.randn(97, 149) Y = rng.randn(111, 149) dist = pairwise_distances(X, Y, metric="manhattan") dist_orig_ind = dist.argmin(axis=0) dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))] dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min( X, Y, axis=0, metric="manhattan", batch_size=50) np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7) np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7) def test_euclidean_distances(): """ Check the pairwise Euclidean distances computation""" X = [[0]] Y = [[1], [2]] D = euclidean_distances(X, Y) assert_array_almost_equal(D, [[1., 2.]]) X = csr_matrix(X) Y = csr_matrix(Y) D = euclidean_distances(X, Y) assert_array_almost_equal(D, [[1., 2.]]) # Paired distances def test_paired_euclidean_distances(): """ Check the paired Euclidean distances computation""" X = [[0], [0]] Y = [[1], [2]] D = paired_euclidean_distances(X, Y) assert_array_almost_equal(D, [1., 2.]) def test_paired_manhattan_distances(): """ Check the paired manhattan distances computation""" X = [[0], [0]] Y = [[1], [2]] D = paired_manhattan_distances(X, Y) assert_array_almost_equal(D, [1., 2.]) def test_chi_square_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((10, 4)) K_add = additive_chi2_kernel(X, Y) gamma = 0.1 K = chi2_kernel(X, Y, gamma=gamma) assert_equal(K.dtype, np.float) for i, x in enumerate(X): for j, y in enumerate(Y): chi2 = -np.sum((x - y) ** 2 / (x + y)) chi2_exp = np.exp(gamma * chi2) assert_almost_equal(K_add[i, j], chi2) assert_almost_equal(K[i, j], chi2_exp) # check diagonal is ones for data with itself K = chi2_kernel(Y) assert_array_equal(np.diag(K), 1) # check off-diagonal is < 1 but > 0: assert_true(np.all(K > 0)) assert_true(np.all(K - np.diag(np.diag(K)) < 1)) # check that float32 is preserved X = rng.random_sample((5, 4)).astype(np.float32) Y = rng.random_sample((10, 4)).astype(np.float32) K = chi2_kernel(X, Y) assert_equal(K.dtype, np.float32) # check integer type gets converted, # check that zeros are handled X = rng.random_sample((10, 4)).astype(np.int32) K = chi2_kernel(X, X) assert_true(np.isfinite(K).all()) assert_equal(K.dtype, np.float) # check that kernel of similar things is greater than dissimilar ones X = [[.3, .7], [1., 0]] Y = [[0, 1], [.9, .1]] K = chi2_kernel(X, Y) assert_greater(K[0, 0], K[0, 1]) assert_greater(K[1, 1], K[1, 0]) # test negative input assert_raises(ValueError, chi2_kernel, [[0, -1]]) assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]]) assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]]) # different n_features in X and Y assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]]) # sparse matrices assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y)) assert_raises(ValueError, additive_chi2_kernel, csr_matrix(X), csr_matrix(Y)) def test_kernel_symmetry(): """ Valid kernels should be symmetric""" rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) for kernel in (linear_kernel, polynomial_kernel, rbf_kernel, sigmoid_kernel, cosine_similarity): K = kernel(X, X) assert_array_almost_equal(K, K.T, 15) def test_kernel_sparse(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) X_sparse = csr_matrix(X) for kernel in (linear_kernel, polynomial_kernel, rbf_kernel, sigmoid_kernel, cosine_similarity): K = kernel(X, X) K2 = kernel(X_sparse, X_sparse) assert_array_almost_equal(K, K2) def test_linear_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = linear_kernel(X, X) # the diagonal elements of a linear kernel are their squared norm assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X]) def test_rbf_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = rbf_kernel(X, X) # the diagonal elements of a rbf kernel are 1 assert_array_almost_equal(K.flat[::6], np.ones(5)) def test_cosine_similarity(): """ Test the cosine_similarity. """ rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((3, 4)) Xcsr = csr_matrix(X) Ycsr = csr_matrix(Y) for X_, Y_ in ((X, None), (X, Y), (Xcsr, None), (Xcsr, Ycsr)): # Test that the cosine is kernel is equal to a linear kernel when data # has been previously normalized by L2-norm. K1 = pairwise_kernels(X_, Y=Y_, metric="cosine") X_ = normalize(X_) if Y_ is not None: Y_ = normalize(Y_) K2 = pairwise_kernels(X_, Y=Y_, metric="linear") assert_array_almost_equal(K1, K2) def test_check_dense_matrices(): """ Ensure that pairwise array check works for dense matrices.""" # Check that if XB is None, XB is returned as reference to XA XA = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, None) assert_true(XA_checked is XB_checked) assert_array_equal(XA, XA_checked) def test_check_XB_returned(): """ Ensure that if XA and XB are given correctly, they return as equal.""" # Check that if XB is not None, it is returned equal. # Note that the second dimension of XB is the same as XA. XA = np.resize(np.arange(40), (5, 8)) XB = np.resize(np.arange(32), (4, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked) XB = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_paired_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked) def test_check_different_dimensions(): """ Ensure an error is raised if the dimensions are different. """ XA = np.resize(np.arange(45), (5, 9)) XB = np.resize(np.arange(32), (4, 8)) assert_raises(ValueError, check_pairwise_arrays, XA, XB) XB = np.resize(np.arange(4 * 9), (4, 9)) assert_raises(ValueError, check_paired_arrays, XA, XB) def test_check_invalid_dimensions(): """ Ensure an error is raised on 1D input arrays. """ XA = np.arange(45) XB = np.resize(np.arange(32), (4, 8)) assert_raises(ValueError, check_pairwise_arrays, XA, XB) XA = np.resize(np.arange(45), (5, 9)) XB = np.arange(32) assert_raises(ValueError, check_pairwise_arrays, XA, XB) def test_check_sparse_arrays(): """ Ensures that checks return valid sparse matrices. """ rng = np.random.RandomState(0) XA = rng.random_sample((5, 4)) XA_sparse = csr_matrix(XA) XB = rng.random_sample((5, 4)) XB_sparse = csr_matrix(XB) XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse) # compare their difference because testing csr matrices for # equality with '==' does not work as expected. assert_true(issparse(XA_checked)) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) assert_true(issparse(XB_checked)) assert_equal(abs(XB_sparse - XB_checked).sum(), 0) XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse) assert_true(issparse(XA_checked)) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) assert_true(issparse(XA_2_checked)) assert_equal(abs(XA_2_checked - XA_checked).sum(), 0) def tuplify(X): """ Turns a numpy matrix (any n-dimensional array) into tuples.""" s = X.shape if len(s) > 1: # Tuplify each sub-array in the input. return tuple(tuplify(row) for row in X) else: # Single dimension input, just return tuple of contents. return tuple(r for r in X) def test_check_tuple_input(): """ Ensures that checks return valid tuples. """ rng = np.random.RandomState(0) XA = rng.random_sample((5, 4)) XA_tuples = tuplify(XA) XB = rng.random_sample((5, 4)) XB_tuples = tuplify(XB) XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples) assert_array_equal(XA_tuples, XA_checked) assert_array_equal(XB_tuples, XB_checked) def test_check_preserve_type(): """ Ensures that type float32 is preserved. """ XA = np.resize(np.arange(40), (5, 8)).astype(np.float32) XB = np.resize(np.arange(40), (5, 8)).astype(np.float32) XA_checked, XB_checked = check_pairwise_arrays(XA, None) assert_equal(XA_checked.dtype, np.float32) # both float32 XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_equal(XA_checked.dtype, np.float32) assert_equal(XB_checked.dtype, np.float32) # mismatched A XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float), XB) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float) # mismatched B XA_checked, XB_checked = check_pairwise_arrays(XA, XB.astype(np.float)) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float)
bsd-3-clause
justinccdev/opensimulator-tools
analysis/opensimulator-stats-analyzer/src/ostagraph.py
2
3710
#!/usr/bin/python import argparse import json import os import os.path import matplotlib.pyplot as plt import sys from pylab import * from osta.osta import * ################# ### FUNCTIONS ### ################# def plotNoneAction(stats, type): for stat in stats: plt.plot(stat[type]['values'], label=stat['container']) def plotSumAction(stats, type): totalsStat = OSimStatsHelper.sumStats(stats) plt.plot(totalsStat[type]['values'], label=totalsStat['container']) def produceGraph(sets, select, statType, action, show, save, outPath): stats = corpus.getStats(sets, select) if len(stats) <= 0: print "No stats matching %s" % (select) return # Used to fetch data that will be the same for all stats oneStat = stats[stats.keys()[0]] clf() plt.title(select) plt.ylabel(oneStat[statType]['units']) plt.xlabel("samples") if action == 'sum': plotSumAction(stats.values(), statType) else: plotNoneAction(stats.values(), statType) plt.legend() if save: savefig(outPath) if show: plt.show() ############ ### MAIN ### ############ parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter) parser.add_argument( '--batch', help = "Path to a json file containing batch instructions for producing graphs. If this is set then any options are ignored except for --outpath", default = argparse.SUPPRESS) parser.add_argument( '--select', help = "Select the full name of a stat to graph (e.g. \"scene.Keynote 1.RootAgents\")") parser.add_argument( '--type', help = "Type of value to graph. Either 'abs' or 'delta'. Default is 'abs'", default = 'abs') parser.add_argument( '--action', help = "Perform an action on the stat or stats. Only current action is none or sum. Default is none.", default = 'none') parser.add_argument( '--out', help = "Path to output the graph rather the interactively display. Filename extension determines graphics type (e.g. \"graph.jpg\")", default = argparse.SUPPRESS) parser.add_argument( '--outdir', help = "Directory to output graphs if the --batch option is used", default = argparse.SUPPRESS) parser.add_argument( 'statsLogPath', help = "Path to the stats log file.", metavar = "stats-log-path", nargs='*') opt = parser.parse_args() corpus = OSimStatsCorpus() for path in opt.statsLogPath: corpus.load(path) if "batch" in opt: batchCommands = json.load(open(opt.batch)) if not os.path.exists(opt.outdir): os.mkdir(opt.outdir) for graph in batchCommands["graphs"]: select = graph["select"] if "sets" in graph: sets = graph["sets"] else: sets = "*" if "type" in graph: type = graph["type"] else: type = "abs" if "action" in graph: action = graph["action"] else: action = "none" if "out" in graph: outPath = os.path.join(opt.outdir, graph["out"]) save = True show = False else: outPath = None save = False show = True produceGraph(sets, select, type, action, show, save, outPath) else: save = "out" in opt show = not save if save: outPath = opt.out else: outPath = None produceGraph("*", opt.select, opt.type, opt.action, show, save, outPath)
bsd-3-clause
cython-testbed/pandas
pandas/compat/__init__.py
1
12818
""" compat ====== Cross-compatible functions for Python 2 and 3. Key items to import for 2/3 compatible code: * iterators: range(), map(), zip(), filter(), reduce() * lists: lrange(), lmap(), lzip(), lfilter() * unicode: u() [no unicode builtin in Python 3] * longs: long (int in Python 3) * callable * iterable method compatibility: iteritems, iterkeys, itervalues * Uses the original method if available, otherwise uses items, keys, values. * types: * text_type: unicode in Python 2, str in Python 3 * binary_type: str in Python 2, bytes in Python 3 * string_types: basestring in Python 2, str in Python 3 * bind_method: binds functions to classes * add_metaclass(metaclass) - class decorator that recreates class with with the given metaclass instead (and avoids intermediary class creation) Other items: * platform checker """ # pylint disable=W0611 # flake8: noqa import re import functools import itertools from distutils.version import LooseVersion from itertools import product import sys import platform import types from unicodedata import east_asian_width import struct import inspect from collections import namedtuple import collections PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] >= 3 PY35 = sys.version_info >= (3, 5) PY36 = sys.version_info >= (3, 6) PY37 = sys.version_info >= (3, 7) PYPY = platform.python_implementation() == 'PyPy' try: import __builtin__ as builtins # not writeable when instantiated with string, doesn't handle unicode well from cStringIO import StringIO as cStringIO # always writeable from StringIO import StringIO BytesIO = StringIO import cPickle import httplib except ImportError: import builtins from io import StringIO, BytesIO cStringIO = StringIO import pickle as cPickle import http.client as httplib from pandas.compat.chainmap import DeepChainMap if PY3: def isidentifier(s): return s.isidentifier() def str_to_bytes(s, encoding=None): return s.encode(encoding or 'ascii') def bytes_to_str(b, encoding=None): return b.decode(encoding or 'utf-8') # The signature version below is directly copied from Django, # https://github.com/django/django/pull/4846 def signature(f): sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) def get_range_parameters(data): """Gets the start, stop, and step parameters from a range object""" return data.start, data.stop, data.step # have to explicitly put builtins into the namespace range = range map = map zip = zip filter = filter intern = sys.intern reduce = functools.reduce long = int unichr = chr # This was introduced in Python 3.3, but we don't support # Python 3.x < 3.5, so checking PY3 is safe. FileNotFoundError = FileNotFoundError # list-producing versions of the major Python iterating functions def lrange(*args, **kwargs): return list(range(*args, **kwargs)) def lzip(*args, **kwargs): return list(zip(*args, **kwargs)) def lmap(*args, **kwargs): return list(map(*args, **kwargs)) def lfilter(*args, **kwargs): return list(filter(*args, **kwargs)) from importlib import reload reload = reload Hashable = collections.abc.Hashable Iterable = collections.abc.Iterable Mapping = collections.abc.Mapping Sequence = collections.abc.Sequence Sized = collections.abc.Sized else: # Python 2 _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") FileNotFoundError = IOError def isidentifier(s, dotted=False): return bool(_name_re.match(s)) def str_to_bytes(s, encoding='ascii'): return s def bytes_to_str(b, encoding='ascii'): return b def signature(f): return inspect.getargspec(f) def get_range_parameters(data): """Gets the start, stop, and step parameters from a range object""" # seems we only have indexing ops to infer # rather than direct accessors if len(data) > 1: step = data[1] - data[0] stop = data[-1] + step start = data[0] elif len(data): start = data[0] stop = data[0] + 1 step = 1 else: start = stop = 0 step = 1 return start, stop, step # import iterator versions of these functions range = xrange intern = intern zip = itertools.izip filter = itertools.ifilter map = itertools.imap reduce = reduce long = long unichr = unichr # Python 2-builtin ranges produce lists lrange = builtins.range lzip = builtins.zip lmap = builtins.map lfilter = builtins.filter reload = builtins.reload Hashable = collections.Hashable Iterable = collections.Iterable Mapping = collections.Mapping Sequence = collections.Sequence Sized = collections.Sized if PY2: def iteritems(obj, **kw): return obj.iteritems(**kw) def iterkeys(obj, **kw): return obj.iterkeys(**kw) def itervalues(obj, **kw): return obj.itervalues(**kw) next = lambda it: it.next() else: def iteritems(obj, **kw): return iter(obj.items(**kw)) def iterkeys(obj, **kw): return iter(obj.keys(**kw)) def itervalues(obj, **kw): return iter(obj.values(**kw)) next = next def bind_method(cls, name, func): """Bind a method to class, python 2 and python 3 compatible. Parameters ---------- cls : type class to receive bound method name : basestring name of method on class instance func : function function to be bound as method Returns ------- None """ # only python 2 has bound/unbound method issue if not PY3: setattr(cls, name, types.MethodType(func, None, cls)) else: setattr(cls, name, func) # ---------------------------------------------------------------------------- # functions largely based / taken from the six module # Much of the code in this module comes from Benjamin Peterson's six library. # The license for this library can be found in LICENSES/SIX and the code can be # found at https://bitbucket.org/gutworth/six # Definition of East Asian Width # http://unicode.org/reports/tr11/ # Ambiguous width can be changed by option _EAW_MAP = {'Na': 1, 'N': 1, 'W': 2, 'F': 2, 'H': 1} if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes def u(s): return s def u_safe(s): return s def to_str(s): """ Convert bytes and non-string into Python 3 str """ if isinstance(s, binary_type): s = bytes_to_str(s) elif not isinstance(s, string_types): s = str(s) return s def strlen(data, encoding=None): # encoding is for compat with PY2 return len(data) def east_asian_len(data, encoding=None, ambiguous_width=1): """ Calculate display width considering unicode East Asian Width """ if isinstance(data, text_type): return sum(_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data) else: return len(data) def import_lzma(): """ import lzma from the std library """ import lzma return lzma def set_function_name(f, name, cls): """ Bind the name/qualname attributes of the function """ f.__name__ = name f.__qualname__ = '{klass}.{name}'.format( klass=cls.__name__, name=name) f.__module__ = cls.__module__ return f ResourceWarning = ResourceWarning else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str def u(s): return unicode(s, "unicode_escape") def u_safe(s): try: return unicode(s, "unicode_escape") except: return s def to_str(s): """ Convert unicode and non-string into Python 2 str """ if not isinstance(s, string_types): s = str(s) return s def strlen(data, encoding=None): try: data = data.decode(encoding) except UnicodeError: pass return len(data) def east_asian_len(data, encoding=None, ambiguous_width=1): """ Calculate display width considering unicode East Asian Width """ if isinstance(data, text_type): try: data = data.decode(encoding) except UnicodeError: pass return sum(_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data) else: return len(data) def import_lzma(): """ import the backported lzma library or raise ImportError if not available """ from backports import lzma return lzma def set_function_name(f, name, cls): """ Bind the name attributes of the function """ f.__name__ = name return f class ResourceWarning(Warning): pass string_and_binary_types = string_types + (binary_type,) try: # callable reintroduced in later versions of Python callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY2: # In PY2 functools.wraps doesn't provide metadata pytest needs to generate # decorated tests using parametrization. See pytest GH issue #2782 def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) for slots_var in orig_vars.get('__slots__', ()): orig_vars.pop(slots_var) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper from collections import OrderedDict, Counter if PY3: def raise_with_traceback(exc, traceback=Ellipsis): if traceback == Ellipsis: _, _, traceback = sys.exc_info() raise exc.with_traceback(traceback) else: # this version of raise is a syntax error in Python 3 exec(""" def raise_with_traceback(exc, traceback=Ellipsis): if traceback == Ellipsis: _, _, traceback = sys.exc_info() raise exc, None, traceback """) raise_with_traceback.__doc__ = """Raise exception with existing traceback. If traceback is not passed, uses sys.exc_info() to get traceback.""" # dateutil minimum version import dateutil if LooseVersion(dateutil.__version__) < LooseVersion('2.5'): raise ImportError('dateutil 2.5.0 is the minimum required version') from dateutil import parser as _date_parser parse_date = _date_parser.parse # In Python 3.7, the private re._pattern_type is removed. # Python 3.5+ have typing.re.Pattern if PY36: import typing re_type = typing.re.Pattern else: re_type = type(re.compile('')) # https://github.com/pandas-dev/pandas/pull/9123 def is_platform_little_endian(): """ am I little endian """ return sys.byteorder == 'little' def is_platform_windows(): return sys.platform == 'win32' or sys.platform == 'cygwin' def is_platform_linux(): return sys.platform == 'linux2' def is_platform_mac(): return sys.platform == 'darwin' def is_platform_32bit(): return struct.calcsize("P") * 8 < 64
bsd-3-clause
michaelaye/pyciss
pyciss/opusapi.py
1
11781
"""This module is making use of the `OPUS API <https://pds-rings-tools.seti.org/opus/api/>`_ to create web requests for OPUS data, metadata, and preview images. """ from pathlib import Path from urllib.parse import urlencode, urlparse from urllib.request import unquote, urlretrieve import pandas as pd import requests from IPython.display import HTML, display from . import io base_url = "https://tools.pds-rings.seti.org/opus/api" metadata_url = base_url + "/metadata" image_url = base_url + "/image/" dic = {"raw_data": "coiss-raw", "calibrated_data": "coiss-calib"} class MetaData(object): """Receive OPUS Metadata for ISS img_id. Parameters ---------- img_id : str In the form of {'N','W'}0123456789, the image id used in science publications """ attr_dic = { "image": "Image Constraints", "wavelength": "Wavelength Constraints", "surface_geom": "Saturn Surface Geometry", "mission": "Cassini Mission Constraints", "ring_geom": "Ring Geometry Constraints", "general": "General Constraints", "iss": "Cassini ISS Constraints", } def __init__(self, img_id, query=None): self.img_id = img_id urlname = "S_IMG_CO_ISS_{}_{}.json".format(img_id[1:], img_id[0]) fullurl = "{}/{}".format(metadata_url, urlname) print("Requesting", fullurl) if query is not None: query = unquote(urlencode(query)) self.r = requests.get(fullurl, params=query).json() else: self.r = requests.get(fullurl).json() # setting attributes to access data quicker: for key, val in self.attr_dic: setattr(self, key, self.r[val]) # this property access the @property def target_name(self): """str: Intended target name for the current ISS observation""" return self.mission["cassini_target_name"] def _get_dataframe_from_meta_dic(meta, attr_name): d = getattr(meta, attr_name) df = pd.DataFrame({k: [v] for (k, v) in d.items()}) df.index = [meta.img_id] return df class OPUSImageURL(object): """Manage URLS from the OPUS response.""" def __init__(self, jsonlist): self.jsonlist = jsonlist for item in jsonlist: parsed = urlparse(item) if "//" in parsed.path: continue if item.upper().endswith(".LBL"): self.label_url = item elif item.upper().endswith(".IMG"): self.image_url = item def __repr__(self): s = "Label:\n{}\nImage:\n{}".format(self.label_url, self.image_url) return s class OPUSObsID(object): """Manage observation IDs from OPUS responses.""" def __init__(self, obsid_data): self.idname = obsid_data[0] self.raw = OPUSImageURL(obsid_data[1][dic["raw_data"]]) # the images have an iteration number. I'm fishing it out here: self.number = self.raw.image_url.split("_")[-1][0] try: self.calib = OPUSImageURL(obsid_data[1][dic["calibrated_data"]]) except KeyError: self.calib = None def _get_img_url(self, size): base = self.raw.label_url[:-4].replace("volumes", "browse") return "{}_{}.jpg".format(base, size) @property def raw_urls(self): return [self.raw.image_url, self.raw.label_url] @property def calib_urls(self): return [self.calib.image_url, self.calib.label_url] @property def all_urls(self): return self.raw_urls + self.calib_urls @property def img_id(self): """Convert OPUS ObsID into the more known image_id.""" tokens = self.idname.split("-") return tokens[-1] @property def small_img_url(self): return self._get_img_url("small") @property def medium_img_url(self): return self._get_img_url("med") @property def thumb_img_url(self): return self._get_img_url("thumb") @property def full_img_url(self): return self._get_img_url("full") def get_meta_data(self): return MetaData(self.img_id) def __repr__(self): s = "Raw:\n{}\nCalibrated:\n{}".format(self.raw, self.calib) return s class OPUS(object): """Manage OPUS API requests. """ def __init__(self, silent=False): self.silent = silent def query_image_id(self, image_id): """Query OPUS via the image_id. This is a query using the 'primaryfilespec' field of the OPUS database. It returns a list of URLS into the `obsids` attribute. This example queries for an image of Titan: >>> opus = opusapi.OPUS() >>> opus.query_image_id('N1695760475_1') After this, one can call `download_results()` to retrieve the found data into the standard locations into the database_path as defined in `.pyciss.yaml` (the config file), """ myquery = {"primaryfilespec": image_id} self.create_files_request(myquery, fmt="json") self.unpack_json_response() return self.obsids def get_metadata(self, obsid, fmt="html", get_response=False): return MetaData(obsid.img_id) def create_request_with_query(self, kind, query, size="thumb", fmt="json"): """api/data.[fmt], api/images/[size].[fmt] api/files.[fmt] kind = ['data', 'images', 'files'] """ if kind == "data" or kind == "files": url = "{}/{}.{}".format(base_url, kind, fmt) elif kind == "images": url = "{}/images/{}.{}".format(base_url, size, fmt) self.url = url self.r = requests.get(url, params=unquote(urlencode(query))) def create_files_request(self, query, fmt="json"): self.create_request_with_query("files", query, fmt=fmt) def create_images_request(self, query, size="thumb", fmt="html"): self.create_request_with_query("images", query, size=size, fmt=fmt) def get_volume_id(self, ring_obsid): url = "{}/{}.json".format(metadata_url, ring_obsid) query = {"cols": "volumeidlist"} r = requests.get(url, params=unquote(urlencode(query))) return r.json()[0]["volume_id_list"] # def create_data_request(self, query, fmt='json'): # myquery = query.copy() # myquery.update(query) # self.create_request_with_query('data', myquery, fmt=fmt) @property def response(self): return self.r.json()["data"] def unpack_json_response(self): if self.r.status_code == 500: if not self.silent: print("No data found.") self.obsids = [] return obsids = [] for obsid_data in self.response.items(): obsids.append(OPUSObsID(obsid_data)) self.obsids = obsids if not self.silent: print("Found {} obsids.".format(len(obsids))) if len(obsids) == 1000: print( "List is 1000 entries long, which is the pre-set limit, hence" " the real number of results might be longe." ) def get_radial_res_query(self, res1, res2): myquery = dict( target="S+RINGS", instrumentid="Cassini+ISS", projectedradialresolution1=res1, projectedradialresolution2=res2, limit=1000, ) return myquery def _get_time_query(self, t1, t2): myquery = dict(instrumentid="Cassini+ISS", timesec1=t1, timesec2=t2) return myquery def get_between_times(self, t1, t2, target=None): """ Query for OPUS data between times t1 and t2. Parameters ---------- t1, t2 : datetime.datetime, strings Start and end time for the query. If type is datetime, will be converted to isoformat string. If type is string already, it needs to be in an accepted international format for time strings. target : str Potential target for the observation query. Most likely will reduce the amount of data matching the query a lot. Returns ------- None, but set's state of the object to have new query results stored in self.obsids. """ try: # checking if times have isoformat() method (datetimes have) t1 = t1.isoformat() t2 = t2.isoformat() except AttributeError: # if not, should already be a string, so do nothing. pass myquery = self._get_time_query(t1, t2) if target is not None: myquery["target"] = target self.create_files_request(myquery, fmt="json") self.unpack_json_response() def get_between_resolutions(self, res1="", res2="0.5"): myquery = self.get_radial_res_query(res1, res2) self.create_files_request(myquery, fmt="json") self.unpack_json_response() def show_images(self, size="small"): """Shows preview images using the Jupyter notebook HTML display. Parameters ========== size : {'small', 'med', 'thumb', 'full'} Determines the size of the preview image to be shown. """ d = dict(small=256, med=512, thumb=100, full=1024) try: width = d[size] except KeyError: print("Allowed keys:", d.keys()) return img_urls = [i._get_img_url(size) for i in self.obsids] imagesList = "".join( [ "<img style='width: {0}px; margin: 0px; float: " "left; border: 1px solid black;' " "src='{1}' />".format(width, s) for s in img_urls ] ) display(HTML(imagesList)) def download_results(self, savedir=None, raw=True, calib=False, index=None): """Download the previously found and stored Opus obsids. Parameters ========== savedir: str or pathlib.Path, optional If the database root folder as defined by the config.ini should not be used, provide a different savedir here. It will be handed to PathManager. """ obsids = self.obsids if index is None else [self.obsids[index]] for obsid in obsids: pm = io.PathManager(obsid.img_id, savedir=savedir) pm.basepath.mkdir(exist_ok=True) to_download = [] if raw is True: to_download.extend(obsid.raw_urls) if calib is True: to_download.extend(obsid.calib_urls) for url in to_download: basename = Path(url).name print("Downloading", basename) store_path = str(pm.basepath / basename) try: urlretrieve(url, store_path) except Exception as e: urlretrieve(url.replace("https", "http"), store_path) return str(pm.basepath) def download_previews(self, savedir=None): """Download preview files for the previously found and stored Opus obsids. Parameters ========== savedir: str or pathlib.Path, optional If the database root folder as defined by the config.ini should not be used, provide a different savedir here. It will be handed to PathManager. """ for obsid in self.obsids: pm = io.PathManager(obsid.img_id, savedir=savedir) pm.basepath.mkdir(exist_ok=True) basename = Path(obsid.medium_img_url).name print("Downloading", basename) urlretrieve(obsid.medium_img_url, str(pm.basepath / basename))
isc
shikhardb/scikit-learn
examples/cross_decomposition/plot_compare_cross_decomposition.py
142
4761
""" =================================== Compare cross decomposition methods =================================== Simple usage of various cross decomposition algorithms: - PLSCanonical - PLSRegression, with multivariate response, a.k.a. PLS2 - PLSRegression, with univariate response, a.k.a. PLS1 - CCA Given 2 multivariate covarying two-dimensional datasets, X, and Y, PLS extracts the 'directions of covariance', i.e. the components of each datasets that explain the most shared variance between both datasets. This is apparent on the **scatterplot matrix** display: components 1 in dataset X and dataset Y are maximally correlated (points lie around the first diagonal). This is also true for components 2 in both dataset, however, the correlation across datasets for different components is weak: the point cloud is very spherical. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA ############################################################################### # Dataset based latent variables model n = 500 # 2 latents vars: l1 = np.random.normal(size=n) l2 = np.random.normal(size=n) latents = np.array([l1, l1, l2, l2]).T X = latents + np.random.normal(size=4 * n).reshape((n, 4)) Y = latents + np.random.normal(size=4 * n).reshape((n, 4)) X_train = X[:n / 2] Y_train = Y[:n / 2] X_test = X[n / 2:] Y_test = Y[n / 2:] print("Corr(X)") print(np.round(np.corrcoef(X.T), 2)) print("Corr(Y)") print(np.round(np.corrcoef(Y.T), 2)) ############################################################################### # Canonical (symmetric) PLS # Transform data # ~~~~~~~~~~~~~~ plsca = PLSCanonical(n_components=2) plsca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test) # Scatter plot of scores # ~~~~~~~~~~~~~~~~~~~~~~ # 1) On diagonal plot X vs Y scores on each components plt.figure(figsize=(12, 8)) plt.subplot(221) plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train") plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 1: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") plt.subplot(224) plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train") plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 2: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") # 2) Off diagonal plot components 1 vs 2 for X and Y plt.subplot(222) plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train") plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test") plt.xlabel("X comp. 1") plt.ylabel("X comp. 2") plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.subplot(223) plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train") plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test") plt.xlabel("Y comp. 1") plt.ylabel("Y comp. 2") plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)' % np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.show() ############################################################################### # PLS regression, with multivariate response, a.k.a. PLS2 n = 1000 q = 3 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) B = np.array([[1, 2] + [0] * (p - 2)] * q).T # each Yj = 1*X1 + 2*X2 + noize Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5 pls2 = PLSRegression(n_components=3) pls2.fit(X, Y) print("True B (such that: Y = XB + Err)") print(B) # compare pls2.coefs with B print("Estimated B") print(np.round(pls2.coefs, 1)) pls2.predict(X) ############################################################################### # PLS regression, with univariate response, a.k.a. PLS1 n = 1000 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5 pls1 = PLSRegression(n_components=3) pls1.fit(X, y) # note that the number of compements exceeds 1 (the dimension of y) print("Estimated betas") print(np.round(pls1.coefs, 1)) ############################################################################### # CCA (PLS mode B with symmetric deflation) cca = CCA(n_components=2) cca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
bsd-3-clause
roxyboy/scikit-learn
sklearn/feature_selection/tests/test_rfe.py
209
11733
""" Testing Recursive feature elimination """ import warnings import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal from nose.tools import assert_equal, assert_true from scipy import sparse from sklearn.feature_selection.rfe import RFE, RFECV from sklearn.datasets import load_iris, make_friedman1 from sklearn.metrics import zero_one_loss from sklearn.svm import SVC, SVR from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import cross_val_score from sklearn.utils import check_random_state from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_greater from sklearn.metrics import make_scorer from sklearn.metrics import get_scorer class MockClassifier(object): """ Dummy classifier to test recursive feature ellimination """ def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert_true(len(X) == len(Y)) self.coef_ = np.ones(X.shape[1], dtype=np.float64) return self def predict(self, T): return T.shape[0] predict_proba = predict decision_function = predict transform = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=True): return {'foo_param': self.foo_param} def set_params(self, **params): return self def test_rfe_set_params(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target clf = SVC(kernel="linear") rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) y_pred = rfe.fit(X, y).predict(X) clf = SVC() with warnings.catch_warnings(record=True): # estimator_params is deprecated rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1, estimator_params={'kernel': 'linear'}) y_pred2 = rfe.fit(X, y).predict(X) assert_array_equal(y_pred, y_pred2) def test_rfe_features_importance(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2) rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) assert_equal(len(rfe.ranking_), X.shape[1]) clf_svc = SVC(kernel="linear") rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1) rfe_svc.fit(X, y) # Check if the supports are equal assert_array_equal(rfe.get_support(), rfe_svc.get_support()) def test_rfe_deprecation_estimator_params(): deprecation_message = ("The parameter 'estimator_params' is deprecated as " "of version 0.16 and will be removed in 0.18. The " "parameter is no longer necessary because the " "value is set via the estimator initialisation or " "set_params method.") generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target assert_warns_message(DeprecationWarning, deprecation_message, RFE(estimator=SVC(), n_features_to_select=4, step=0.1, estimator_params={'kernel': 'linear'}).fit, X=X, y=y) assert_warns_message(DeprecationWarning, deprecation_message, RFECV(estimator=SVC(), step=1, cv=5, estimator_params={'kernel': 'linear'}).fit, X=X, y=y) def test_rfe(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] X_sparse = sparse.csr_matrix(X) y = iris.target # dense model clf = SVC(kernel="linear") rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) X_r = rfe.transform(X) clf.fit(X_r, y) assert_equal(len(rfe.ranking_), X.shape[1]) # sparse model clf_sparse = SVC(kernel="linear") rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1) rfe_sparse.fit(X_sparse, y) X_r_sparse = rfe_sparse.transform(X_sparse) assert_equal(X_r.shape, iris.data.shape) assert_array_almost_equal(X_r[:10], iris.data[:10]) assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data)) assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target)) assert_array_almost_equal(X_r, X_r_sparse.toarray()) def test_rfe_mockclassifier(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target # dense model clf = MockClassifier() rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) X_r = rfe.transform(X) clf.fit(X_r, y) assert_equal(len(rfe.ranking_), X.shape[1]) assert_equal(X_r.shape, iris.data.shape) def test_rfecv(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Test using the score function rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5) rfecv.fit(X, y) # non-regression test for missing worst feature: assert_equal(len(rfecv.grid_scores_), X.shape[1]) assert_equal(len(rfecv.ranking_), X.shape[1]) X_r = rfecv.transform(X) # All the noisy variable were filtered out assert_array_equal(X_r, iris.data) # same in sparse rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5) X_sparse = sparse.csr_matrix(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) # Test using a customized loss function scoring = make_scorer(zero_one_loss, greater_is_better=False) rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=scoring) ignore_warnings(rfecv.fit)(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test using a scorer scorer = get_scorer('accuracy') rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=scorer) rfecv.fit(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test fix on grid_scores def test_scorer(estimator, X, y): return 1.0 rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=test_scorer) rfecv.fit(X, y) assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_))) # Same as the first two tests, but with step=2 rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5) rfecv.fit(X, y) assert_equal(len(rfecv.grid_scores_), 6) assert_equal(len(rfecv.ranking_), X.shape[1]) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5) X_sparse = sparse.csr_matrix(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) def test_rfecv_mockclassifier(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Test using the score function rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5) rfecv.fit(X, y) # non-regression test for missing worst feature: assert_equal(len(rfecv.grid_scores_), X.shape[1]) assert_equal(len(rfecv.ranking_), X.shape[1]) def test_rfe_estimator_tags(): rfe = RFE(SVC(kernel='linear')) assert_equal(rfe._estimator_type, "classifier") # make sure that cross-validation is stratified iris = load_iris() score = cross_val_score(rfe, iris.data, iris.target) assert_greater(score.min(), .7) def test_rfe_min_step(): n_features = 10 X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0) n_samples, n_features = X.shape estimator = SVR(kernel="linear") # Test when floor(step * n_features) <= 0 selector = RFE(estimator, step=0.01) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) # Test when step is between (0,1) and floor(step * n_features) > 0 selector = RFE(estimator, step=0.20) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) # Test when step is an integer selector = RFE(estimator, step=5) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) def test_number_of_subsets_of_features(): # In RFE, 'number_of_subsets_of_features' # = the number of iterations in '_fit' # = max(ranking_) # = 1 + (n_features + step - n_features_to_select - 1) // step # After optimization #4534, this number # = 1 + np.ceil((n_features - n_features_to_select) / float(step)) # This test case is to test their equivalence, refer to #4534 and #3824 def formula1(n_features, n_features_to_select, step): return 1 + ((n_features + step - n_features_to_select - 1) // step) def formula2(n_features, n_features_to_select, step): return 1 + np.ceil((n_features - n_features_to_select) / float(step)) # RFE # Case 1, n_features - n_features_to_select is divisible by step # Case 2, n_features - n_features_to_select is not divisible by step n_features_list = [11, 11] n_features_to_select_list = [3, 3] step_list = [2, 3] for n_features, n_features_to_select, step in zip( n_features_list, n_features_to_select_list, step_list): generator = check_random_state(43) X = generator.normal(size=(100, n_features)) y = generator.rand(100).round() rfe = RFE(estimator=SVC(kernel="linear"), n_features_to_select=n_features_to_select, step=step) rfe.fit(X, y) # this number also equals to the maximum of ranking_ assert_equal(np.max(rfe.ranking_), formula1(n_features, n_features_to_select, step)) assert_equal(np.max(rfe.ranking_), formula2(n_features, n_features_to_select, step)) # In RFECV, 'fit' calls 'RFE._fit' # 'number_of_subsets_of_features' of RFE # = the size of 'grid_scores' of RFECV # = the number of iterations of the for loop before optimization #4534 # RFECV, n_features_to_select = 1 # Case 1, n_features - 1 is divisible by step # Case 2, n_features - 1 is not divisible by step n_features_to_select = 1 n_features_list = [11, 10] step_list = [2, 2] for n_features, step in zip(n_features_list, step_list): generator = check_random_state(43) X = generator.normal(size=(100, n_features)) y = generator.rand(100).round() rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5) rfecv.fit(X, y) assert_equal(rfecv.grid_scores_.shape[0], formula1(n_features, n_features_to_select, step)) assert_equal(rfecv.grid_scores_.shape[0], formula2(n_features, n_features_to_select, step))
bsd-3-clause
bitemyapp/ggplot
ggplot/utils/color.py
13
72058
""" Python module for color functions. """ from __future__ import division from __future__ import print_function from __future__ import with_statement import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors import types from functools import partial from collections import Iterable from matplotlib.cbook import is_string_like from matplotlib.cm import get_cmap from matplotlib.colors import LinearSegmentedColormap def display_color(c): """ Utility for displaying a color. display_color will make a plot with a circle that is the input parameter. Parameters ========== c - string a color; can be RGB, hex, name, whatever """ dot = plt.Circle((.5,.5), .4, color=c) fig = plt.gcf() fig.gca().add_artist(dot) plt.show() def display_colors(cs): n = len(cs) fig = plt.gcf() print("Colors:") for i, c in enumerate(cs): print(i, c, (i, 0.5), 1./n) fig.gca().add_artist(plt.Circle((i/n, 0.5), 1./n, color=c)) plt.show() class SMeta(type): """ Usage: __metaclass__ = SMeta """ def __call__(*args): cls = args[0] key = args[1:] try: cache = cls._cache except: cache = dict() cls._cache = cache try: obj = cache[key] except: obj = type.__call__(*args) cache[key] = obj return obj class ColorModel(object): """ Color Model base class. Note that this is generated as "singleton" - only one object of each class. """ __metaclass__ = SMeta limits = np.tile(np.array([0.,1.]),(3,1)) range = limits.copy() @classmethod def _inverse(self): raise NotImplementedError() @classmethod def inverse(cls, *args, **kwargs): """ Return inverse color transform. Subclasses to define method _inverse to return instance of inverse object. """ if len(args) > 0 or len(kwargs) > 0: return cls._inverse()(*args, **kwargs) return cls._inverse() def __call__(self, *agrs, **kwargs): """ Accepts and return [x,3] array. Optionally deal with 3 vectors, 3 scalars. Treatment of 3x3 is ambiguous and will be interpreted as [x,3]. """ raise NotImplementedError() # a set of conversion routines to be used by derived classes @staticmethod def _args_to_vectors(args): """ TODO - need to add auto-convert to gray """ assert len(args) in (1,3) if len(args) == 3: if not isinstance(args[0], np.ndarray): mode = 0 p0 = np.array([args[0]]) p1 = np.array([args[1]]) p2 = np.array([args[2]]) else: mode = 1 p0, p1, p2 = args else: arg = args[0] if isinstance(arg, Iterable) and not isinstance(arg, np.ndarray): arg = np.array(arg, dtype = np.float64) assert isinstance(arg, np.ndarray) if len(arg.shape) == 2: if arg.shape[1] == 3: mode = 2 p0, p1, p2 = arg.transpose() else: mode = 3 p0, p1, p2 = arg else: assert arg.shape == (3,) mode = 4 p0, p1, p2 = arg[:,np.newaxis] return p0, p1, p2, mode @staticmethod def _args_to_array(args): """ TODO - need to add auto-convert to gray """ assert len(args) in (1,3) if len(args) == 3: if not isinstance(args[0], np.ndarray): mode = 0 a = np.array([args]) else: mode = 1 a = np.array(args).transpose() else: arg = args[0] if isinstance(arg, Iterable) and not isinstance(arg, np.ndarray): arg = np.array(arg, dtype = np.float64) assert isinstance(arg, np.ndarray) if len(arg.shape) == 2: if arg.shape[1] == 3: mode = 2 a = arg else: mode = 3 a = arg.transpose() else: assert arg.shape == (3,) mode = 4 a = np.array(args) return a, mode @staticmethod def _vectors_to_return(p0, p1, p2, mode): if mode == 0: return p0[0], p1[0], p2[0] if mode == 1: return p0, p1, p2 if mode == 2: return np.vstack((p0, p1, p2)).transpose() if mode == 3: return np.vstack((p0, p1, p2)) return np.hstack((p0, p1, p2)) @staticmethod def _array_to_return(a, mode): if mode == 0: return a[0][0], a[0][1], a[0][2] if mode == 1: return a[:,0], a[:,1], a[:,2] if mode == 2: return a if mode == 3: return a.transpose() return a[0] # deal with gray values @staticmethod def _gray_args_to_vector(*args): assert len(args) == 1 arg = args[0] if isinstance(arg, Iterable) and not isinstance(arg, np.ndarray): arg = np.array(arg, dtype = np.float64) if not isinstance(arg, np.ndarray): a = np.array(args) mode = 0 else: if arg.shape == (): a = arg[np.newaxis] mode = 4 elif len(arg.shape) == 1: mode = 2 a = arg else: assert len(arg.shape) == 2 if arg.shape[0] == 1: mode = 2 a = arg[0,:] else: assert arg.shape[1] == 1 mode = 3 a = arg[:,0] return a, mode @staticmethod def _gray_array_to_return(a, mode): if mode == 0: return a[0][0], a[0][1], a[0][2] if mode == 1: return a[:,0], a[:,1], a[:,2] if mode == 2: return a if mode == 3: return a.transpose() return a[0] @classmethod def gray(cls, *args): """ Return gray value for given scalar in the source color space. Return should be: scalar --> tuple np_array --> np_array: [1,x] --> [3,x] for x > 1 else: [x,3] This is implemented in cls._gray_array_to_return Here we provide as default the method for RGB as this is used in all of the 'inverse' transforms. If a class provides arrays _gray_index and _gray_value then additionally we set in, e.g., [x, 3] [x,_gray_index] = [_gray_value] Typical use is a 2-vector, e.g., _gray_index = [1,2] _gray_value = [0,1] for use in color circle values like HSV. """ v, mode = cls._gray_args_to_vector(*args) a = np.tile(v,(3,1)).transpose() try: a[:,np.array(cls._gray_index)] = np.array(cls._gray_value)[np.newaxis,:] except: pass return cls._array_to_return(a, mode) @classmethod def is_normal(cls, limits): """ Check whether range is valid or should be normalized. This just covers a set of default checks from my old IDL routines. """ for i in xrange(3): if cls.limits[i,1] > 1 and limits[i,1] <= 1: return False if cls.limits[i,1] <= 1 and limits[i,1] > cls.limits[i,1]: return False if cls.limits[i,0] > 1 and limits[i,0] < cls.limits[i,0]: return False return True @classmethod def normalize(cls, *args): """ By default we just scale 0...1 range to limits no matter what the values. """ a, mode = cls._args_to_array(*args) m = cls.limits[:,0][np.newaxis,:] M = cls.limits[:,1][np.newaxis,:] a = m + a*(M-m) print("why do we normalize?") return cls._array_to_return(a, mode) class ColorModelMatrix(ColorModel): """ Prototype for matric color classes. provides __call__ method requires _matrix class attribute """ @classmethod def _transform(cls, a): return np.inner(a, cls._matrix) @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ a, mode = cls._args_to_array(args) np.clip(a, (cls.limits[:,0])[np.newaxis,:], (cls.limits[:,1])[np.newaxis,:], out = a) a = cls._transform(a) np.clip(a, (cls.range[:,0])[np.newaxis,:], (cls.range[:,1])[np.newaxis,:], out = a) return cls._array_to_return(a, mode) ####################################################################### # define specific color models class ColorRGB(ColorModel): """ RGB is essentially just identity. """ @classmethod def _inverse(cls): return cls() @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ return cls._array_to_return(*cls._args_to_array(*args)) #----------------------------------------------------------------------- class ColorCMY(ColorRGB): """ Convert CMY to RGB or inverse. """ @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ cmy, mode = cls._args_to_array(*args) rgb = 1 - cmy return cls._array_to_return(rgb, mode) #----------------------------------------------------------------------- class ColorHSV(ColorModel): """ HSV color model. hue = [0, 360] saturation = [0, 1] value =[0, 1] """ limits = np.array([[0., 360.],[0., 1.],[0., 1.]]) _perm = np.array([[0,1,2],[1,0,2],[2,0,1],[2,1,0],[1,2,0],[0,2,1]]) @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ h, s, v, mode = cls._args_to_vectors(args) np.mod(h, 360., out = h) np.clip(s,0,1, out = s) np.clip(v,0,1, out = v) c = v * s p = h / 60. x = c * (1 - np.abs(np.mod(p, 2.) - 1.)) m = v - c z = np.zeros_like(x) col = np.vstack((c,x,z)).transpose() ip = np.int64(p) rgb = col[np.tile(np.arange(len(x)),(3,1)).transpose(),cls._perm[ip]] rgb += m[:,np.newaxis] np.clip(rgb,0,1, out = rgb) return cls._array_to_return(rgb, mode) @classmethod def _inverse(cls, *args, **kwargs): return ColorHSVInverse() _gray_index = [0,1] _gray_value = [90,0] class ColorHSVInverse(ColorModel): """ Convert RGB to HSV. """ range = ColorHSV.limits @classmethod def __call__(cls, *args): """ Convert colors. Return: hue = [0, 360] saturation = [0, 1] value =[0, 1] """ r, g, b, mode = cls._args_to_vectors(args) M = np.maximum(r,np.maximum(g,b)) m = np.minimum(r,np.minimum(g,b)) C = M - m h = np.zeros_like(C) i = M == r h[i] = np.mod((g[i]-b[i])/C[i],6) i = M == g h[i] = (b[i]-r[i])/C[i] + 2 i = M == b h[i] = (r[i]-g[i])/C[i] + 4 H = h * 60 V = M S = np.zeros_like(C) i = C != 0 S[i] = C[i]/V[i] return cls._vectors_to_return(H,S,V, mode) @staticmethod def _inverse(): return ColorHSV() #----------------------------------------------------------------------- class ColorHSL(ColorModel): """ HSL color model. hue = [0, 360] saturation = [0, 1] lightness =[0, 1] """ limits = np.array([[0., 360.],[0., 1.],[0., 1.]]) _perm = np.array([[0,1,2],[1,0,2],[2,0,1],[2,1,0],[1,2,0],[0,2,1]]) @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ h, s, l, mode = cls._args_to_vectors(args) np.mod(h, 360., out = h) np.clip(s,0,1, out = s) np.clip(l,0,1, out = l) c = (1 - np.abs(2 * l - 1)) * s p = h / 60. x = c * (1 - np.abs(np.mod(p, 2.) - 1.)) m = l - 0.5 * c z = np.zeros_like(x) col = np.vstack((c,x,z)).transpose() ip = np.int64(p) rgb = col[np.tile(np.arange(len(x)),(3,1)).transpose(),cls._perm[ip]] rgb += m[:,np.newaxis] np.clip(rgb,0,1, out = rgb) return cls._array_to_return(rgb, mode) @staticmethod def _inverse(): return ColorHSLInverse() _gray_index = [0,1] _gray_value = [90,0.] class ColorHSLInverse(ColorModel): """ Convert RGB to HSL. """ range = ColorHSL.limits @classmethod def __call__(cls, *args): """ Convert colors. Return: hue = [0, 360] lightness = [0, 1] saturation = [0, 1] """ r, g, b, mode = cls._args_to_vectors(args) M = np.maximum(r,np.maximum(g,b)) m = np.minimum(r,np.minimum(g,b)) C = M - m h = np.zeros_like(C) i = M == r h[i] = np.mod((g[i]-b[i])/C[i],6) i = M == g h[i] = (b[i]-r[i])/C[i] + 2 i = M == b h[i] = (r[i]-g[i])/C[i] + 4 H = h * 60 L = 0.5*(M + m) S = np.zeros_like(C) i = C != 0 S[i] = C[i] / V(1 - np.abs(2 * L[i]-1)) return cls._vectors_to_return(H,S,L, mode) @staticmethod def _inverse(): return ColorHSL() #----------------------------------------------------------------------- class ColorHSI(ColorModel): """ HSI color model. hue = [0, 360] saturation = [0, 1] intensity =[0, 1] """ limits = np.array([[0., 360.],[0., 1.],[0., 1.]]) _perm = np.array([[0,1,2],[2,0,1],[1,2,0]]) @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ h, s, i, mode = cls._args_to_vectors(args) np.mod(h, 360., out = h) np.clip(i,0,1, out = i) np.clip(s,0,1, out = s) p = h / 60. f = 0.5 * np.mod(p, 2.) c = s * i * 3 x = c * (1 - f) y = c * f z = np.zeros_like(x) m = i - i * s col = np.vstack((x,y,z)).transpose() ip = np.int64(p/2) rgb = col[np.tile(np.arange(len(x)),(3,1)).transpose(),cls._perm[ip]] rgb += m[:,np.newaxis] np.clip(rgb,0,1, out = rgb) return cls._array_to_return(rgb, mode) @staticmethod def _inverse(): return ColorHSIInverse() _gray_index = [0,1] _gray_value = [90,0.] class ColorHSIInverse(ColorModel): """ Convert RGB to HSI. """ range = ColorHSI.limits @classmethod def __call__(cls, *args): """ Convert colors. Return: hue = [0, 360] lightness = [0, 1] saturation = [0, 1] """ r, g, b, mode = cls._args_to_vectors(args) r = clip(r,1,0) g = clip(g,1,0) b = clip(b,1,0) M = np.maximum(r,np.maximum(g,b)) m = np.minimum(r,np.minimum(g,b)) C = M - m h = np.zeros_like(C) i = M == r h[i] = np.mod((g[i]-b[i])/C[i],6) i = M == g h[i] = (b[i]-r[i])/C[i] + 2 i = M == b h[i] = (r[i]-g[i])/C[i] + 4 H = h * 60 I = (r + g + b) / 3. S = np.zeros_like(C) i = C != 0 S[i] = 1 - m[i] / I[i] return cls._vectors_to_return(H,S,I, mode) @staticmethod def _inverse(): return ColorHSI() #----------------------------------------------------------------------- class ColorHCL(ColorModel): """ HCL color model 'luma/chroma/hue' (renamed for consitency) hue = [0, 360] chroma = [0, 1] luma =[0, 1] Use Y'_601 = 0.30*R + 0.59*G + 0.11*B http://en.wikipedia.org/wiki/HSL_and_HSV#Color-making_attributes """ limits = np.array([[0., 360.],[0., 1.],[0., 1.]]) _perm = np.array([[0,1,2],[1,0,2],[2,0,1],[2,1,0],[1,2,0],[0,2,1]]) _luma_vec = np.array([0.30, 0.59, 0.11]) @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ h, c, y, mode = cls._args_to_vectors(args) h = np.mod(h, 360.) c = np.clip(c,0,1, out = c) y = np.clip(y,0,1, out = y) p = h / 60. x = c * (1 - np.abs(np.mod(p, 2.) - 1.)) z = np.zeros_like(x) ip = np.int64(p) col = np.vstack((c,x,z)).transpose() rgb = col[np.tile(np.arange(len(x)),(3,1)).transpose(),cls._perm[ip]] m = y - np.dot(rgb, cls._luma_vec) rgb += m[:,np.newaxis] rgb = np.clip(rgb,0,1, out = rgb) return cls._array_to_return(rgb, mode) @staticmethod def _inverse(): return ColorHCLInverse() @classmethod def gray(cls, *args): __doc__ = ColorModel.gray.__doc__ return cls.inverse(ColorRGB().gray(*args)) class ColorHCLInverse(ColorModel): """ Convert RGB to HCL. Return: hue = [0, 360] chroma = [0, 1] luma =[0, 1] http://en.wikipedia.org/wiki/HSL_and_HSV#Color-making_attributes """ range = ColorHCL.limits _luma_vec = ColorHCL._luma_vec @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ r, g, b, mode = cls._args_to_vectors(args) r = clip(r,1,0) g = clip(g,1,0) b = clip(b,1,0) M = np.maximum(r,np.maximum(g,b)) m = np.minimum(r,np.minimum(g,b)) C = M - m h = np.zeros_like(C) i = np.logical_and(M == r, C > 0) h[i] = np.mod((g[i]-b[i])/C[i],6) i = np.logical_and(M == g, C > 0) h[i] = (b[i]-r[i])/C[i] + 2 i = np.logical_and(M == b, C > 0) h[i] = (r[i]-g[i])/C[i] + 4 H = h * 60 y = np.dot(np.array([r, g, b]).transpose(), cls._luma_vec) return cls._vectors_to_return(H,C,y, mode) @staticmethod def _inverse(): return ColorHCL() #----------------------------------------------------------------------- class ColorHCL2(ColorHCL): """ HCL color model 'luma/chroma/hue' (renamed for consitency) Input: hue = [0, 360] chroma = [0, 1] luma =[0, 1] Use Y'709 = 0.21*R + 0.72*G + 0.07*B http://en.wikipedia.org/wiki/HSL_and_HSV#Color-making_attributes """ limits = np.array([[0., 360.],[0., 1.],[0., 1.]]) _perm = np.array([[0,1,2],[1,0,2],[2,0,1],[2,1,0],[1,2,0],[0,2,1]]) _luma_vec = np.array([0.21, 0.72, 0.07]) @staticmethod def _inverse(): return ColorHCL2Inverse() class ColorHCL2Inverse(ColorHCLInverse): """ Convert RGB to HCL. Return: hue = [0, 360] chroma = [0, 1] luma =[0, 1] Use Y'709 = 0.21*R + 0.72*G + 0.07*B http://en.wikipedia.org/wiki/HSL_and_HSV#Color-making_attributes """ range = ColorHCL.limits _luma_vec = ColorHCL2._luma_vec @staticmethod def _inverse(): return ColorHCL2() #----------------------------------------------------------------------- class ColorYIQ(ColorModelMatrix): """ YIQ color model. y = [0, 1] |i| <= 0.596 |q| <= 0.523 'gray' value: I = Q = 0 """ limits = np.array([[0., 1.],[-0.596, +0.596],[-0.523, +0.523]]) _matrixI = np.matrix( [[0.299, 0.587, 0.114], [0.596,-0.275,-0.321], [0.212,-0.523, 0.311]]) _matrix = _matrixI.getI() _gray_index = [1,2] _gray_value = [0,0] @staticmethod def _inverse(): return ColorYIQInverse() class ColorYIQInverse(ColorModelMatrix): """ Convert RGB to YIQ. Return: y = [0, 1] |i| <= 0.596 |q| <= 0.523 """ range = ColorYIQ.limits _matrix = ColorYIQ._matrixI @staticmethod def _inverse(): return ColorYIQ() #----------------------------------------------------------------------- class ColorYUV(ColorModelMatrix): """ YUV color model. Input: y = [0, 1] |u| <= 0.436 |v| <= 0.615 Rec. 601 http://en.wikipedia.org/wiki/YUV """ limits = np.array([[0., 1.],[-0.436, +0.436],[-0.615, +0.615]]) _matrix = np.matrix( [[ 1, 0 , 1.13983], [ 1,-0.39465,-0.58060], [ 1, 2.03211, 0 ]]) _gray_index = [1,2] _gray_value = [0,0] @staticmethod def _inverse(): return ColorYUVInverse() class ColorYUVInverse(ColorModelMatrix): """ Convert RGB to YUV. Return: y = [0, 1] |u| <= 0.436 |v| <= 0.615 Rec. 601 http://en.wikipedia.org/wiki/YUV """ range = ColorYUV.limits _matrix = np.matrix( [[ 0.299 , 0.587 , 0.114 ], [-0.14713,-0.28886 , 0.463 ], [ 0.615 ,-0.551499,-0.10001]]) @staticmethod def _inverse(): return ColorYUV() #----------------------------------------------------------------------- class ColorYUV2(ColorModelMatrix): """ YUV color model. Input: y = [0, 1] |u| <= 0.436 (?) |v| <= 0.615 (?) Rec. 709 http://en.wikipedia.org/wiki/YUV """ limits = np.array([[0., 1.],[-0.436, +0.436],[-0.615, +0.615]]) _matrix = np.matrix( [[ 1, 0 , 1.28033], [ 1,-0.21482,-0.38059], [ 1, 2.12798, 0 ]]) _gray_index = [1,2] _gray_value = [0,0] @staticmethod def _inverse(): return ColorYUV2Inverse() class ColorYUV2Inverse(ColorModelMatrix): """ Convert RGB to YUV. Return: |u| <= 0.436 (?) |v| <= 0.615 (?) Rec. 709 http://en.wikipedia.org/wiki/YUV """ range = ColorYUV.limits _matrix = np.matrix( [[ 1, 0 , 1.28033], [ 1,-0.21482,-0.38059], [ 1, 2.12798, 0 ]]) @staticmethod def _inverse(): return ColorYUV2() #----------------------------------------------------------------------- class ColorYCbCr(ColorModelMatrix): """ YCrCb color model. Input: y = floating-point value between 16 and 235 Cb, Cr: floating-point values between 16 and 240 """ limits = np.array([[16., 235.],[16., 240.],[16., 240.]]) _matrix = np.matrix( [[ 1, 0 , 1.402 ], [ 1,-0.344,-0.714 ], [ 1,+1.772, 0 ]]) @staticmethod def _inverse(): return ColorYCbCrInverse() @classmethod def _transform(cls, a): return (np.inner(a, cls._matrix) - np.array([0.,128.,128.])[np.newaxis,:]) / 256. @classmethod def gray(cls, *args): __doc__ = ColorModel.gray.__doc__ return cls.inverse(ColorRGB().gray(*args)) class ColorYCbCrInverse(ColorModelMatrix): """ Convert RGB to YCbCr. Return: y = floating-point value between 16 and 235 Cb, Cr: floating-point values between 16 and 240 """ range = ColorYCbCr.limits _matrix = np.matrix( [[ 0.299 , 0.587 , 0.114 ], [-0.169 ,-0.331 , 0.499 ], [ 0.499 ,-0.418 ,-0.0813 ]]) @classmethod def _transform(cls, a): return np.inner(a * 256 + np.array([0.,128.,128.])[np.newaxis,:], cls._matrix) @staticmethod def _inverse(): return ColorYCrCb() #----------------------------------------------------------------------- class ColorYDbDr(ColorModelMatrix): """ YDrDb color model. Input: y = [0, 1] Db, Dr: [-1.333, +1.333] http://en.wikipedia.org/wiki/YDbDr """ limits = np.array([[0., 1.],[-1.333,1.333],[-1.333,1.333]]) _matrixI = np.matrix( [[ 0.299 , 0.587 , 0.114 ], [-0.450 ,-0.883 ,+1.333 ], [-1.333 , 1.116 , 0.217 ]]) _matrix = _matrixI.getI() @staticmethod def _inverse(): return ColorYDbDrInverse() @classmethod def gray(cls, *args): __doc__ = ColorModel.gray.__doc__ return cls.inverse(ColorRGB().gray(*args)) class ColorYDbDrInverse(ColorModelMatrix): """ Convert RGB to YDbDr. Return: y = [0, 1] Db, Dr: [-1.333, +1.333] http://en.wikipedia.org/wiki/YDbDr """ range = ColorYDbDr.limits _matrix = ColorYDbDr._matrixI @staticmethod def _inverse(): return ColorYDrDb() #----------------------------------------------------------------------- class ColorYPbPr(ColorModelMatrix): """ YPbPr color model. Input: y = [0, 1] Pb,Pr = [-0.5, 0.5] """ limits = np.array([[0., 1.],[-0.5, 0.5],[-0.5, 0.5]]) _R = 0.2126 _G = 0.7152 _B = 0.0722 _matrixI = np.matrix( [[ _R, _G, _B], [ -_R, -_G,1-_B], [1-_R, -_G, -_B]]) _matrix = _matrixI.getI() _gray_index = [1,2] _gray_value = [0,0] @staticmethod def _inverse(): return ColorYPbPrInverse() class ColorYPbPrInverse(ColorModelMatrix): """ Convert RGB to YPbPr. Return: y = [0, 1] Pb,Pr = [-0.5, 0.5] """ range = ColorYPbPr.limits _matrix = ColorYPbPr._matrixI @staticmethod def _inverse(): return ColorYPbPr() #----------------------------------------------------------------------- class ColorXYZ(ColorModelMatrix): """ CIE XYZ color model. Input: X, Y, Z """ _scale = 1. #/0.17697 limits = np.array([[0., 1.],[0., 1.],[0., 1.]]) * _scale _matrixI = np.matrix( [[0.49 ,0.31 ,0.20 ], [0.17697,0.81240,0.01063], [0.00 ,0.01 ,0.99 ]]) * _scale _matrix = _matrixI.getI() @staticmethod def _inverse(): return ColorXYZInverse() @classmethod def gray(cls, *args): __doc__ = ColorModel.gray.__doc__ return cls.inverse(ColorRGB().gray(*args)) class ColorXYZInverse(ColorModelMatrix): """ Convert RGB to XYZ. Return: X, Y, Z """ range = ColorXYZ.limits _matrix = ColorXYZ._matrixI @staticmethod def _inverse(): return ColorXYZ() #----------------------------------------------------------------------- class ColorLMS(ColorModelMatrix): """ CIE CAT02 LMS color model. Input: L, M, S """ _MCAT02 = np.matrix( [[ 0.7328, 0.4296,-0.1624], [-0.7036, 1.6975, 0.0061], [ 0.0030, 0.0136, 0.9834]]) _matrixI = np.matrix(np.inner(_MCAT02, ColorXYZ._matrixI.transpose())) limits = np.inner(_MCAT02, ColorXYZ.limits.transpose()) _matrix = _matrixI.getI() @staticmethod def _inverse(): return ColorLMSInverse() @classmethod def gray(cls, *args): __doc__ = ColorModel.gray.__doc__ return cls.inverse(ColorRGB().gray(*args)) class ColorLMSInverse(ColorModelMatrix): """ Convert RGB to LMS. CIE CAT02 LMS color model. Return: L, M, S """ range = ColorLMS.limits _matrix = ColorLMS._matrixI @staticmethod def _inverse(): return ColorLMS() #----------------------------------------------------------------------- class ColorxyY(ColorModel): """ CIE xyY color model. Input: x, y, Y http://en.wikipedia.org/wiki/Chromaticity_coordinate """ limits = np.array([[-1., 1.],[-1., 1.],ColorXYZ.limits[1]]) @staticmethod def _inverse(): return ColorxyYInverse() @classmethod def gray(cls, *args): __doc__ = ColorModel.gray.__doc__ return cls.inverse(ColorRGB().gray(*args)) @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ x,y,Y, mode = cls._args_to_vectors(args) Yy = np.ones_like(y) ind = y != 0 Yy[ind] = Y[ind]/y[ind] X = Yy * x Z = Yy * (1. - x - y) rgb = ColorXYZ()(np.vstack((X,Y,Z)).transpose()) return cls._array_to_return(rgb, mode) class ColorxyYInverse(ColorModel): """ Convert RGB to xyY. Return: x, y, Y """ range = ColorxyY.limits @staticmethod def _inverse(): return ColorxyY() @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ r,g,b, mode = cls._args_to_vectors(args) X,Y,Z = ColorXYZInverse()(r,g,b) s = X + Y + Z ind = s != 0 si = 1./s x = np.zeros_like(X) y = np.zeros_like(Y) x[ind] = X * si y[ind] = Y * si return cls._vectors_to_return(x,y,Y, mode) #----------------------------------------------------------------------- class ColorLab(ColorModel): """ CIE L*a*b* color model use D65 (6504 K) X=95.047, Y=100.00, Z=108.883 http://en.wikipedia.org/wiki/CIE_Standard_Illuminant_D65 http://en.wikipedia.org/wiki/Lab_color_space """ _Xn = 95.047 _Yn = 100.00 _Zn = 108.883 limits = np.array([[-1,1],[-1,1],[-1,1.]])*np.inf @staticmethod def _inverse(): return ColorLabInverse() @classmethod def gray(cls, *args): __doc__ = ColorModel.gray.__doc__ return cls.inverse(ColorRGB().gray(*args)) def _fn(x): ind = x > 6./29. y = x.copy() y[ind] = x[ind]**3 ind = np.logical_not(ind) y[ind] = 3.*(6/29.)**2*(x-4/29.) return y @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ L,a,b, mode = cls._args_to_vectors(args) Y = cls._Yn * cls._fn((L+16.)/116.) X = cls._Xn * cls._fn((L+16.)/116. + a/500.) Z = cls._Zn * cls._fn((L+16.)/116. - b/200.) rgb = ColorXYZ()(np.vstack((X,Y,Z)).transpose()) return cls._array_to_return(rgb, mode) class ColorLabInverse(ColorModel): """ CIE L*a*b* color model use D65 (6504 K) X=95.047, Y=100.00, Z=108.883 http://en.wikipedia.org/wiki/CIE_Standard_Illuminant_D65 http://en.wikipedia.org/wiki/Lab_color_space """ range = ColorLab.limits _Xn = ColorLab._Xn _Yn = ColorLab._Yn _Zn = ColorLab._Zn @staticmethod def _inverse(): return ColorLab() @staticmethod def _f(x): y = x.copy() ind = x > (6./29.)**3 y[ind] = x[ind]**(1./3.) ind = np.logical_not(ind) y[ind] = 1./3.*(29./6.)**2 * x + 4./29. return y @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ r,g,b, mode = cls._args_to_vectors(args) X,Y,Z = ColorXYZInverse()(r,g,b) L = 116. * cls._f(Y / cls._Yn) - 16. a = 500. * (cls._f(X / cls._Xn) - cls._f(Y / cls._Yn)) b = 200. * (cls._f(Y / cls._Yn) - cls._f(Z / cls._Zn)) return cls._vectors_to_return(L,a,b, mode) #----------------------------------------------------------------------- class ColorLab2(ColorModel): """ Hunter/Adams Lab color model use D65 (6504 K) X=95.047, Y=100.00, Z=108.883 http://en.wikipedia.org/wiki/CIE_Standard_Illuminant_D65 http://en.wikipedia.org/wiki/Lab_color_space """ _Xn = 95.047 _Yn = 100.00 _Zn = 108.883 _Ka = 175./198.04 * (_Xn + _Yn) _Kb = 70./218.11 * (_Yn + _Zn) _K = _Ka / 100. _ke = _Ka / _Kb limits = np.array([[-1,1],[-1,1],[-1,1.]])*np.inf @staticmethod def _inverse(): return ColorLab2Inverse() @classmethod def gray(cls, *args): __doc__ = ColorModel.gray.__doc__ return cls.inverse(ColorRGB().gray(*args)) @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ L,a,b, mode = cls._args_to_vectors(args) ind = L != 0 ca = np.ones_like(L) cb = np.ones_like(L) Y = (L / 100.)**2 * cls._Yn ca[ind] = a[ind] / (cls._K * L[ind]) cb[ind] = b[ind] / (cls._K * L[ind]) X = (ca + 1.) * (Y / cls._Yn) * cls._Xn Z = (1. - (cb / cls._ke)) * (Y / cls._Yn) * cls._Zn rgb = ColorXYZ()(np.vstack((X,Y,Z)).transpose()) return cls._array_to_return(rgb, mode) class ColorLab2Inverse(ColorModel): """ Hunter/Adams Lab color model use D65 (6504 K) X=95.047, Y=100.00, Z=108.883 http://en.wikipedia.org/wiki/CIE_Standard_Illuminant_D65 http://en.wikipedia.org/wiki/Lab_color_space """ range = ColorLab.limits _Xn = ColorLab2._Xn _Yn = ColorLab2._Yn _Zn = ColorLab2._Zn _Ka = ColorLab2._Ka _Kb = ColorLab2._Kb _K = ColorLab2._K _ke = ColorLab2._ke @staticmethod def _inverse(): return ColorLab2() @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ r,g,b, mode = cls._args_to_vectors(args) X,Y,Z = ColorXYZInverse()(r,g,b) ind = Y != 0 ca = np.ones_like(Y) cb = np.ones_like(Y) L = 100. * np.sqrt(Y / cls._Yn) ca[ind] = (X[ind] / cls._Xn) / (Y[ind] / cls._Yn) - 1. cb[ind] = cls._ke * (1. - (Z[ind] / cls._Zn) / (Y[ind] / cls._Yn)) a = cls._K * L * ca b = cls._K * L * cb return cls._vectors_to_return(L,a,b, mode) #----------------------------------------------------------------------- # there seems to be an issue that LMS can return negative values. class ColorCAM(ColorModel): """ CIECAM02 http://en.wikipedia.org/wiki/CIECAM02 """ limits = np.array([[-1,1],[-1,1],[-1,1.]])*np.inf _LW = 100. # cd/m^2 _Yb = 20. # luminace of background _Yw = 100. # luminace of reference white _LA = _LW * _Yb / _Yw # suppsed to be LW/5 for 'gray' _F = 1. # factor determining degree of adaptation _D = _F * (1. - 1./3.6 * np.exp((-_LA + 42.)/92.)) # The degree of adaptation # _D = 0 # no adaptation # reference white _Lwr = _Mwr = _Swr = _Ywr = 100 # illuminant white _Lw = _Mw = _Sw = _Yw = 100 _fL = (1. + (_Yw * _Lwr / (_Ywr * _Lw) - 1.)* _D) _fM = (1. + (_Yw * _Mwr / (_Ywr * _Mw) - 1.)* _D) _fS = (1. + (_Yw * _Swr / (_Ywr * _Sw) - 1.)* _D) _MCAT02 = ColorLMS._MCAT02 _MH = np.matrix( [[ 0.38971, 0.68898,-0.07868], [-0.22981, 1.18340, 0.04641], [ 0.00000, 0.00000, 1.00000]]) _ML = np.inner(_MCAT02, _MH.getI().transpose()) _k = 1./(5.* _LA + 1.) _FL = 1./5. * _k**4*(5.*_LA) + 1./10. * (1. - _k**4)**2 * (5.*_LA)**(1./3.) @staticmethod def _inverse(): return ColorCAMInverse() @classmethod def gray(cls, *args): __doc__ = ColorModel.gray.__doc__ return cls.inverse(ColorRGB().gray(*args)) @classmethod def _fn(cls, y): y1 = y - 0.1 xp = y1 * 27.13 / (400. - y1) x = xp **(1./0.42) * 100. / cls._FL return x @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ Lap,Map,Sap, mode = cls._args_to_vectors(args) Lp = cls._fn(Lap) Mp = cls._fn(Map) Sp = cls._fn(Sap) Lc, Mc, Sc = np.inner(cls._ML, np.array([Lp, Mp, Sp]).transpose()) L = Lc / cls._fL M = Mc / cls._fM S = Sc / cls._fS r,g,b = ColorLMS()(L,M,S) return cls._vectors_to_return(r,g,b, mode) class ColorCAMInverse(ColorModel): """ CIE CAM02 http://en.wikipedia.org/wiki/CIECAM02 """ range = ColorCAM.limits _LW = 100. # cd/m^2 _Yb = 20. # luminace of background _Yw = 100. # luminace of reference white _LA = _LW * _Yb / _Yw # suppsed to be LW/5 for 'gray' _F = 1. # factor determining degree of adaptation _D = _F * (1. - 1./3.6 * np.exp((-_LA + 42.)/92.)) # The degree of adaptation # _D = 0 # no adaptation # reference white _Lwr = _Mwr = _Swr = _Ywr = 100 # illuminant white _Lw = _Mw = _Sw = _Yw = 100 _fL = (1. + (_Yw * _Lwr / (_Ywr * _Lw) - 1.)* _D) _fM = (1. + (_Yw * _Mwr / (_Ywr * _Mw) - 1.)* _D) _fS = (1. + (_Yw * _Swr / (_Ywr * _Sw) - 1.)* _D) _MH = np.matrix( [[ 0.38971, 0.68898,-0.07868], [-0.22981, 1.18340, 0.04641], [ 0.00000, 0.00000, 1.00000]]) _MCAT02 = ColorLMS._MCAT02 _ML = np.inner(_MH, _MCAT02.getI().transpose()) _k = 1./(5.* _LA + 1.) _FL = 1./5. * _k**4*(5.*_LA) + 1./10. * (1. - _k**4)**2 * (5.*_LA)**(1./3.) @staticmethod def _inverse(): return ColorCAM() @classmethod def _f(cls, x): xp = (cls._FL * x / 100.)**0.42 y = 400.* xp / (27.13 + xp) + 0.1 return y @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ r,g,b, mode = cls._args_to_vectors(args) L,M,S = ColorLMSInverse()(r,g,b) Lc = cls._fL * L Mc = cls._fM * M Sc = cls._fS * S Lp, Mp, Sp = np.clip(np.inner(cls._ML, np.array([Lc, Mc, Sc]).transpose()),0,np.inf) Lap = cls._f(Lp) Map = cls._f(Mp) Sap = cls._f(Sp) return cls._vectors_to_return(Lap,Map,Sap, mode) #----------------------------------------------------------------------- class ColorsRGB(ColorModel): """ sRGB color model ICE 61966-2-1 """ @staticmethod def _s(x): mask = x > 0.04045 x[mask] = ((x[mask] + 0.055)/1.055)**2.4 mask = np.logical_not(mask) x[mask] = x[mask]/12.92 return np.clip(x, 0, 1, out = x) @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ sRGB, mode = cls._args_to_array(args) rgb = np.apply_along_axis(cls._s, -1, sRGB) return cls._array_to_return(rgb, mode) # sR, sG, sB, mode = cls._args_to_vectors(args) # r = cls._s(sR) # g = cls._s(sG) # b = cls._s(sB) # return cls._vectors_to_return(r,g,b, mode) @staticmethod def _inverse(): return ColorsRGBInverse() class ColorsRGBInverse(ColorModel): """ Convert RGB to sRGB. """ @staticmethod def _s(x): mask = x > 0.04045/12.92 x[mask] = x[mask]**(1/2.4) * 1.055 - 0.055 mask = np.logical_not(mask) x[mask] = x[mask] * 12.92 return np.clip(x, 0, 1, out = x) @classmethod def __call__(cls, *args): __doc__ = ColorModel.__call__.__doc__ rgb, mode = cls._args_to_array(args) sRGB = np.apply_along_axis(cls._s, -1, rgb) return cls._array_to_return(sRGB, mode) @staticmethod def _inverse(): return ColorsRGB() #----------------------------------------------------------------------- #----------------------------------------------------------------------- # register color models _color_models = dict() def register_color_model(name, model): assert isinstance(name, str) assert issubclass(type(model), ColorModel) _color_models[name] = model def color_models(): return _color_models def color_model(model = 'RGB'): return _color_models[model] #register_color_model('RGB', ColorRGB()) #register_color_model('CMY', ColorCMY()) #register_color_model('HSV', ColorHSV()) #register_color_model('HSL', ColorHSL()) #register_color_model('HSI', ColorHSI()) #register_color_model('HCL', ColorHCL()) # Rev 601, seems also go as 'yCH' #register_color_model('HCL2', ColorHCL()) # Rev 709, seems also go as 'yCH' #register_color_model('YIQ', ColorYIQ()) #register_color_model('YUV', ColorYUV()) # Rev 601 #register_color_model('YUV2', ColorYUV2()) # Rev 709 #register_color_model('YCbCr', ColorYCbCr()) #register_color_model('YDbDr', ColorYDbDr()) #register_color_model('YPbPr', ColorYPbPr()) #register_color_model('XYZ', ColorXYZ()) # CIE XYZ #register_color_model('LMS', ColorLMS()) # CIE CAM 02 LMS #register_color_model('xyY', ColorxyY()) # CIE xyY #register_color_model('Lab', ColorLab()) # CIE L*a*b*, 6504 K #register_color_model('Lab2', ColorLab2()) # Hunter Lab, 6504 K #register_color_model('CAM', ColorCAM()) # CIE CAM 02 #register_color_model('sRGB', ColorsRGB()) # IEC 61966-2-1 ####################################################################### ####################################################################### # Define Color[Function] as a replacement for Colormap def colormap(name, **kwargs): if isinstance(name, ColorMap): return name c = get_cfunc(name) if c is None: try: c = ColorMap.from_Colormap(name, **kwargs) except: pass return c class Color(colors.Colormap): """ Base class to provide color map functionallity but on 'color function' basis. Users may overwrite the __init__ routing to set up coloring functions. This should usually call the base method provided here. The main coloring is done in the function _function. The values, normalized to 0..1, will be passed as a 1D array for simplicity. If the class attribute _alpha is False (default) it is assumed _function will return a numpy array of RGB values (:,3) and an array of RGBA values (:.4) otherwise, also normalized to the range 0...1. Much of the remaining functionallity should be very similar to color maps. """ bytes = False # default for bytes _alpha = False # tell whether function computes alpha def __init__(self, *args, **kwargs): self.bytes = kwargs.get('bytes', False) self.name = kwargs.get('name', self.__class__.__name__) self.alpha = kwargs.get('alpha', 1.) self._rgba_bad = np.array([0.0, 0.0, 0.0, 0.0]) # If bad, don't paint anything. self._rgba_under = None self._rgba_over = None def __call__(self, data, *args, **kwargs): """ Process data and return RGBA value. """ kw = dict(kwargs) alpha = kw.setdefault('alpha', None) normalize = kw.setdefault('normalize', None) bytes = kw.setdefault('bytes', self.bytes) data, shape = self._input_to_data(data, normalize) if alpha is not None: alpha = np.clip(alpha, 0., 1.) mask_bad = np.logical_not(np.isfinite(data)) mask_under = data < 0 mask_over = data > 1 mask = np.logical_not( np.logical_or( np.logical_or( mask_bad, mask_under), mask_over)) out = self._get_out(data, mask, *args, **kw) # SIMPLE TREATMENT FOR INVALID/LOW/HIGH DATA if self._rgba_under is None: self._rgba_under = self._default_color(0., *args, **kw) if self._rgba_over is None: self._rgba_over = self._default_color(1., *args, **kw) out[mask_under,:] = self._rgba_under out[mask_over ,:] = self._rgba_over if alpha is not None: out[:,-1] = alpha out[mask_bad ,:] = self._rgba_bad return self._return(out, shape, bytes) def _default_color(self, x, alpha, *arg, **kw): return self._get_out(np.array([x]), alpha, *arg, **kw)[0] def _get_out(self, data, mask = None, *args, **kw): out = np.ndarray((data.size,4)) if mask is None: mask = np.tile(True, data.size) if self._alpha: out[mask,:] = self._function(data[mask], *args, **kw) else: out[mask,:3] = self._function(data[mask], *args, **kw) out[: , 3] = self.alpha return out def _function(data, *args, **kwargs): """ prototype conversion # if self._alpha: # out = np.tile(data, 4).reshape(-1,4) # else: # out = np.tile(data, 3).reshape(-1,3) """ raise NotImplementedError() def _update_alpha(self, alpha): """ compute output array """ if not self._alpha: if alpha is None: alpha = 1. else: out[:,3] = alpha return alpha @staticmethod def _input_to_data(data, normalize): """ Normalize and get shape """ # May need to allow other formats as well. if not isinstance(data, np.ndarray): data = np.asarray(data) if normalize in (None, True): M = data.max() m = data.min() if normalize is None: normalize = m < 0. or M > 1. if normalize: d = M - m if d == 0: d = 1. data = (data - m) / d shape = data.shape data = data.reshape(-1) return data, shape def _return(self, out, shape, bytes): """ output conversion """ out = out.reshape(list(shape) + [-1]) if bytes is None: bytes = self.bytes if bytes: out = np.array(np.minimum(out*256,255), dtype = np.ubyte) return out def Colormap(self, N = 256, name = None): """ Return matplotlib.colors.Colormap object with N levels. """ x = np.linspace(0,1,N) if name is None: name = self.name cm = colors.ListedColormap( self.__call__(x), name = name, N = N) cm.set_bad(self._rgba_bad) cm.set_under(self._rgba_under) cm.set_over(self._rgba_over) return cm def set_bad(self, color='k', alpha=None): ''' Set color to be used for masked values. ''' if alpha is None: alpha = self.alpha self._rgba_bad = colorConverter.to_rgba(color, alpha) def set_under(self, color='k', alpha=None): ''' Set color to be used for low out-of-range values. Requires norm.clip = False ''' if alpha is None: alpha = self.alpha self._rgba_under = colorConverter.to_rgba(color, alpha) def set_over(self, color='k', alpha=None): ''' Set color to be used for high out-of-range values. Requires norm.clip = False ''' if alpha is None: alpha = self.alpha self._rgba_over = colorConverter.to_rgba(color, alpha) def _set_extremes(self): pass def _init(self): raise NotImplementedError("Color Function") def is_gray(self): """ Return whether color is gray. """ # Subclasses may overwrite this. return False _N = 1024 def get_N(self): return self._N N = property(get_N) _colors = dict() def register_color(name, color): assert isinstance(name, str) assert isinstance(color, Color) assert not _colors.has_key(name) _colors[name] = color def get_cfunc(name): """ return color_function object of given name """ return _colors.get(name, None) class ColorMap(Color): _alpha = True def __init__(self, map = None, layout = None, model = None, color = None, models = None, normalize = None, gamma = 1., gamma_func = None, **kwargs): """ Set up Color Map. This is based on Color[functions]. The power is that the methed alloes arbitraty smooth/fine interpolation. model: color model to use models: list of models for numeric values color: use this color if not given in layout alpha: alpha value layout: [X][C|CCC][A][G|GG|GGG|GGGG][M][N] X: coordinate, non-zero values normalized to [0,1] XXXX: gamma for each color and alpha C: grayscale CCC: three color values A: alpha G: gamma, same for all GG: gamma for color and alpha GGG: gamma for three colors but not alpha GGGG: gamma for each color and alpha M: model N: normalize (see below) map: use <0 for invalid data? bytes: - set default normalize: normalize valuse to valid range None| -1: auto-determine True| +1: yes False| 0: no Normalization is based on [0,1] range is given, translate to valid range for parameters. NOTES: X coordinates < 0: [bad [, lower, upper]] The interval generally is scaled lineraly so that X[-1] becomes 1. In each interval each component is interpolated from the begiing to end value according to the a function normalized to 0...1 for the interval. The scaling itself is detemined by the 'gamma' parameter at the end of the interval, so the first values is ignored, and so are the gamma for negiative indices (see above). gamma can be a scalar, then it is interpreted as a power for interpolation in an interval. """ alpha = kwargs.get('alpha', None) super(ColorMap, self).__init__(**kwargs) self._gamma = kwargs.get('gamma', 1.) assert layout is not None layout = layout.upper() ncoord = layout.count('X') assert ncoord in (0,1,4), "can 0,1, or 4 coordinates" ipos = layout.find('X') if ncoord == 0 : if map.ndim == 1: n = map.size else: n = map.shape[0] coord = np.arange(n).reshape((1, ncoord)) ncoord = 1 else: n = map.shape[0] coord = np.ndarray((n, ncoord)) for i in xrange(ncoord): coord[:,i] = map[:,ipos] ipos = layout.find('X', ipos + 1) if coord.dtype is not np.float64: coord = np.array(coord, dtype = np.float64) for j in xrange(ncoord): ii = coord[:,j] >= 0 i, = np.where(ii) coord[ii,j] -= coord[i[0],j] coord[ii,j] /= coord[ii,j].max() assert layout.count('A') < 2, "can have only one alpha value" ipos = layout.find('A') if ipos >= 0: alpha = map[:,ipos] else: if alpha == None: alpha = 1. alpha = np.tile(alpha, n) assert layout.count('N') < 2, "can have only one normalization value" ipos = layout.find('N') if ipos >= 0: normal = map[:,ipos] else: if normalize == -1: normalize = None normal = np.tile(normalize, n) norm = np.empty_like(normal, dtype = np.object) for i,x in enumerate(normal): if x == 1: x == True elif x == 0: x = False else: x = None norm[i] = x normal = norm assert layout.count('M') < 2, "can have only one model value" ipos = layout.find('M') if ipos >= 0: model = map[:,ipos] else: if model == None: model = 0 model = np.tile(model, n) # models is converted to array of color objects if models == None: models = ['RGB', 'HSV', 'HSL', 'HSI'] models = np.array(models).reshape(-1) m = np.empty_like(model, dtype = np.object) for i,x in enumerate(model): if not isinstance(x, str): x = models[x] m[i] = _color_models[x.upper()] model = m nc = layout.count('C') assert nc in (1,3), "Color has to be C or CCC" if nc == 0: if color == None: color = 0. if len(color) == 1: color = np.array([mx.gray(color) for mx in model]) elif len(color) == 3: color = np.tile(color, (3,1)) else: raise AttributeError("Wrong format in 'color' keyword.") if nc == 1: ipos = layout.find('C') c = map[:,ipos] color = np.array([mx.gray(cx) for (mx, cx) in zip(model, c)]) else: color = np.ndarray((n,3)) ipos = -1 for i in xrange(3): ipos = layout.find('C', ipos + 1) color[:,i] = map[:,ipos] # normalize # 1) auto-detect d = dict() for i in xrange(n): if normal[i] is None: m = model[i] c = color[i] try: limits = d[m] limits[:,0] = np.minimum(limit[:,0],c) limits[:,1] = np.maximum(limit[:,1],c) except: limits = np.tile(c,(2,1)).transpose() d[m] = limits for m,l in d.items(): d[m] = not m.is_normal(l) # 2) do normalization for i in xrange(n): m = model[i] if normal[i] is None: normal[i] = d[m] if normal[i]: color[i,:] = m.normalize(color[i,:]) # combine color and alpha color = np.hstack((color, alpha.reshape(-1,1))) # ADD/TREAT 'invalid' colors [bad [, lower, upper]] ii = coord[:, 0] < 0 im,= np.where(ii) assert im.size in (0,1,3), "Only [bad [, lower, upper]] allowed for 'invalid' colors" if im.size > 0: i = im[0] self._rgba_bad = np.hstack((model[i](color[i,0:3]),color[i,3])) if im.size > 1: i = im[1] self._rgba_lower = np.hstack((model[i](color[i,0:3]),color[i,3])) i = im[2] self._rgba_upper = np.hstack((model[i](color[i,0:3]),color[i,3])) jj = np.logical_not(ii) map = map [:,jj] color = color[:,jj] model = model[:,jj] coord = coord[:,jj] # convert to N x 4 array for gamma ng = layout.count('G') assert nc in (1,3), "Gamma has to be G, GG, GGG, or GGGG" if ng == 0: gamma = np.tile(1., (n,4)) if ng == 1: ipos = layout.find('G') g = map[:,ipos] gamma = np.tile(g,(4,1)).transpose() if ng == 2: ipos = layout.find('G') g = map[:,ipos] gamma = np.tile(g,(4,1)).transpose() ipos = layout.find('G', ipos+1) g = map[:,ipos] gamma[:,3] = g if ng == 3: gamma = np.ndarray((n,4), dtype = map.dtype) ipos = -1 for i in xrange(3): ipos = layout.find('G', ipos + 1) gamma[:,i] = map[:,ipos] gamma[:,3] = np.tile(1., n) if ng == 4: gamma = np.ndarray((n,4), dtype = map.dtype) ipos = -1 for i in xrange(4): ipos = layout.find('G', ipos + 1) gamma[:,i] = map[:,ipos] # translate to functions if gamma_func == None: gamma_func = lambda x, gamma: np.power(x, gamma) assert isinstance(gamma_func, types.FunctionType), ( "gamma_func needs to be a function") if gamma.dtype == np.object: g = gamma else: g = np.empty_like(gamma, dtype = object) identiy = lambda x: x for i,f in enumerate(gamma.flat): if not isinstance(f, types.FunctionType): if f is None: g.flat[i] = identity else: g.flat[i] = partial(gamma_func, gamma = f) gamma = g # save setup self.n = n self.gamma = gamma self.color = color self.model = model self.coord = coord self.ncoord = ncoord def _function(self, data, *args, **kwargs): out = np.ndarray((data.size, 4)) coord = self.coord ** (1 / self._gamma) # gamma from LinearSegmentedColormap assert self.ncoord in (1,4), "require consisient set of gamma" if self.ncoord == 1: # use np.piecwise instead? color0 = self.color[0,:] coord0 = coord[0,0] for i in xrange(1, self.n): if self.model[i-1] != self.model[i]: color0[0:3] = self.model[i].inverse()(self.model[i-1](color0[0:3])) color1 = self.color[i,:] coord1 = coord[i,0] if coord0 < coord1: # allow discontinuous maps ind = np.logical_and(data >= coord[i-1], data <= coord[i]) if np.count_nonzero(ind): dcolor = color1 - color0 dcoord = coord1 - coord0 colcoord = (data[ind] - coord0) / dcoord for j in xrange(4): out[ind,j] = color0[j] + self.gamma[i,j](colcoord)*dcolor[j] if self.model[i] != _color_models['RGB']: out[ind,0:3] = self.model[i](out[ind,0:3]) color0 = color1 coord0 = coord1 else: assert np.all(self.model[0] == self.model[:]),'All color models need to be equal if using independent coordinates' for j in xrange(4): coord0 = coord[0, j] color0 = self.color[0,j] for i in xrange(1, self.n): color1 = self.color[i,j] coord1 = coord[i, j] if coord0 < coord1: # allow discontinuous maps ind = np.logical_and(data >= coord0, data <= coord1) if np.count_nonzero(ind): dcolor = color1 - color0 dcoord = coord1 - coord0 colcoord = (data[ind] - coord0) / dcoord out[ind, j] = color0 + self.gamma[i,j](colcoord)*dcolor color0 = color1 coord0 = coord1 if self.model[0] != _color_models['RGB']: # transform only valid data ind = np.logical_and(data >= 0, data <= 1) out[ind,0:3] = self.model[i](out[ind,0:3]) return out @staticmethod def from_Colormap_spec(colors, **kwargs): if not ('red' in colors): assert isinstance(colors, Iterable) if (isinstance(colors[0], Iterable) and len(colors[0]) == 2 and not is_string_like(colors[0])): # List of value, color pairs vals, colors = zip(*colors) else: vals = np.linspace(0., 1., len(colors)) map = np.ndarray([[val] + list(colorConverter.to_rgba(color)) for val, color in zip(vals, colors)]) return ColorMap(map, layout = 'XCCCA', **kwargs) if callable(colors['red']): map = np.array([np.zeros(9), np.ones(9)], dtype = object) map[1,5] = lambda x: np.clip(colors['red'](x),0,1) map[1,6] = lambda x: np.clip(colors['green'](x),0,1) map[1,7] = lambda x: np.clip(colors['blue'](x),0,1) if 'alpha' in colors: map[1,8] = lambda x: np.clip(colors['alpha'],0,1) else: map[0,4] = 1. return ColorMap(map, layout = 'XCCCAGGGG', **kwargs) xmap = [] for c in ('red', 'green', 'blue', 'alpha'): color = colors.get(c, None) if color == None: if c == 'alpha': color = ((0,0,1.), (1,1,1.)) else: color = ((0,0,0.), (1,1,0.)) color = np.array(color) shape = color.shape assert len(shape) == 2 assert shape[1] == 3 # copied from matplotlib.color.py x = color[:, 0] y0 = color[:, 1] y1 = color[:, 2] if x[0] != 0. or x[-1] != 1.: raise ValueError("data mapping points must start with x=0. and end with x=1") if np.sometrue(np.sort(x) - x): raise ValueError("data mapping points must have x in increasing order") # end copy xc = [[x[0], y1[0]]] for i in xrange(1,shape[0]-1): xc += [[x[i], y0[i]]] if y0[i] != y1[i]: xc += [[x[i], y1[i]]] i = shape[0]-1 xc += [[x[i], y0[i]]] xmap += [np.array(xc)] nn = np.array([len(xc) for xc in xmap]) n = np.max(nn) map = np.ones((n,8)) for i,xc in enumerate(xmap): map[0:nn[i],i::4] = xc if np.all(map[:,0:4] == map[:,0][:,np.newaxis]): map = map[:,3:] layout = 'XCCCA' else: layout = 'XXXXCCCA' return ColorMap(map, layout = layout, **kwargs) @staticmethod def from_Colormap(map, name = None, gamma = None, **kwargs): if isinstance(map, ColorMap): return map if isinstance(map, str): name = map map = get_cmap(name) if isinstance(map, LinearSegmentedColormap): segmentdata = map._segmentdata if gamma is not None: gamma = map._gamma rgba_bad = map._rgba_bad rgba_under = map._rgba_under rgba_over = map._rgba_over if name is not None: name = map.name f = ColorMap.from_Colormap_spec( segmentdata, name = name, gamma = gamma, **kwargs) if rgba_under is not None: f.set_under(rgba_under) if rgba_over is not None: f.set_over(rgba_over) else: if gamma is None: gamma = 1.0 f = ColorMap.from_Colormap_spec( map, name = name, gamma = gamma, **kwargs) return f ####################################################################### # Some specific color maps & examples class ColorMapGal(ColorMap): maps = { 0: np.array( [[0,0,0,1,0], [5,0,0,0,2], [7,1,0,0,0.5]]), 1: np.array( [[0,1,1,1,0], [2,0,1,0,2], [3,0,0.75,0.75,1], [4,0,0,1,1], [5,0,0,0,1], [6,1,0,0,1], [7,1,1,0,0.75]]), 2: np.array( [[0,1,1,1,0], [2,0,1,0,2], [3,0,0.75,0.75,1], [4,0,0,1,1], [6,1,0,0,1], [7,1,1,0,0.75]]), 3: np.array( [[0,1,1,1,0], [2,0,1,0,2], [3,0,1,1,1], [4,0,0,1,1], [5,1,0,1,1], [6,1,0,0,1], [6.25,1,.75,0,2]]), 4: np.array( [[0,1,1,1,0], [1,.75,.75,.75,2], [2,0,1,0,2], [3,0,1,1,1], [4,0,0,1,1], [5,1,0,1,1], [6,1,0,0,1], [6.25,1,.75,0,2]]) } _len = len(maps) def __init__(self, mode = 1): try: map = self.maps[mode] except: raise AttributeError('Invalid mode') super(ColorMapGal, self).__init__( map = map, layout = 'XCCCG') #for i in xrange(ColorMapGal._len): # register_color('GalMap{:d}'.format(i), ColorMapGal(i)) class ColorMapGray(ColorMap): maps = { 0: np.array( [[0,0,0], [1,1,1]]), 1: np.array( [[0,0,0], [1,1,lambda x: 0.5*(1-np.cos(x*np.pi))]]), } _len = len(maps) def __init__(self, mode = 0): try: map = self.maps[mode] except: raise AttributeError('Invalid mode.') super(ColorMapGray, self).__init__( map = map, layout = 'XCG') @staticmethod def is_gray(): return True #for i in xrange(ColorMapGray._len): # register_color('GrayMap{:d}'.format(i), ColorMapGray(i)) class ColorRGBWaves(Color): """ red-green-blue+waves """ _alpha = False def __init__(self, nwaves = 200, **kwargs): super(ColorRGBWaves, self).__init__(**kwargs) self.waves = nwaves*np.pi self._N = np.max((np.round(12*nwaves), 1024)) def _function(self,x, *args, **kwargs): return _color_models['HSV'](np.array([ 300 * x, x**0.5, 1 - 0.25 * (np.sin(x*self.waves)**2) ]).transpose()) #register_color('RGBWaves200', ColorRGBWaves(200)) class ColorRKB(Color): """ red-black-blue """ _alpha = False def _function(self,x, *args, **kwargs): def theta(x): y = np.zeros_like(x) y[x<0] = -1 y[x>0] = +1 return y return _color_models['HSV'](np.array([ 30+180*x+30*theta(x-0.5), np.ones_like(x), np.minimum(5*np.abs(x - 0.5),1) ]).transpose()) #register_color('RKB', ColorRKB()) class ColorBWR(ColorMap): """ blue white red with adjustable white at paramter value """ def __init__(self, white = 0.5, gamma = 2.0, **kwargs): assert 0 <= white <= 1 assert gamma > 0 map = np.array( [[0 ,0,0,1,0.0], [white,1,1,1,gamma], [1 ,1,0,0,1./gamma]]) super(ColorBWR, self).__init__( map = map, layout = 'XCCCG', **kwargs) #register_color('BWR', ColorBWR()) class ColorBWGRY(ColorMap): """ red white blue with adjustable white at paramter value """ def __init__(self, p = 0.5, **kwargs): assert 0 <= p <= 1 p13 = (1-p)/3 map = np.array( [[0 ,0,0,1,0.00], [p ,1,1,1,2.00], [1-2*p13,0,1,0,1.00], [1-1*p13,1,0,0,0.85], [1 ,1,1,0,1]]) super(ColorBWGRY, self).__init__( map = map, layout = 'XCCCG', **kwargs) #register_color('BWGRY', ColorBWGRY()) class ColorKRGB(Color): """ red-green-blue+waves """ _alpha = False def __init__(self, **kwargs): super(ColorKRGB, self).__init__(**kwargs) def _function(self,x, *args, **kwargs): return _color_models['HSV'](np.array([ x*270, np.ones_like(x), np.minimum(10*x,1) ]).transpose()) #register_color('KRGB', ColorKRGB()) class ColorBWC(ColorMap): """ grey-white-color """ def __init__(self, p = 0.5, mode = 0, **kwargs): assert 0 <= p <= 1 p2 = (1-p)/3 + p if mode == 0: map = np.array( [[0 , 0,0,0,0.], [p , 120,0,1,1.], [p2, 120,1,1,2.], [1 ,-120,1,1,1.]]) elif mode == 1: map = np.array( [[0 , 0,0,0,0.], [p ,120,0,1,1.], [p2,120,1,1,2.], [1 ,420,1,1,1.]]) elif mode == 2: f = lambda x: np.sin(0.5*x*np.pi) map = np.array( [[0 , 0,0,0,0.], [p ,240,0,1,1.], [p2,240,1,1,f], [1 ,-30,1,1,1.]]) else: map = np.array( [[0 , 0,0,0,0.], [p ,240,0,1,1.], [p2,240,1,1,1.], [1 ,-30,1,1,1.]]) super(ColorBWC, self).__init__( map = map, layout = 'XCCCG', model = 'HSV', normalize = False, **kwargs) #register_color('BWC', ColorBWC()) class ColorMapFunction(ColorMap): """ generate color function form color map by linear interpolation """ def __init__(self, name): pass ####################################################################### from matplotlib.colors import rgb2hex def isocolors(n, start=0, stop=360): h = np.linspace(start, stop, n, endpoint = False) return np.array([rgb2hex(color_model('HSV')(hi,1,1)) for hi in h ]) def isogray(n, start=0, stop=1, endpoint=False): h = np.linspace(start, stop, n, endpoint = endpoint) return np.array([rgb2hex(g) for g in color_model('RGB').gray(h)]) def isoshadecolor(n, start=0, stop=1, hue = 0, endpoint = False): h = np.linspace(start, stop, n, endpoint = endpoint) return np.array([rgb2hex(color_model('HSV')(hue,1-hi, 1)) for hi in h[::-1] ]) ####################################################################### ####################################################################### ####################################################################### def test(): N = 1000 x = np.exp(np.linspace(-2.0, 3.0, N)) y = np.exp(np.linspace(-2.0, 2.0, N)) x = (np.linspace(-2.0, 3.0, N)) y = (np.linspace(-2.0, 2.0, N)) X, Y = np.meshgrid(x, y) X1 = 0.5*(X[:-1,:-1] + X[1:,1:]) Y1 = 0.5*(Y[:-1,:-1] + Y[1:,1:]) from matplotlib.mlab import bivariate_normal Z1 = bivariate_normal(X1, Y1, 0.1, 0.2, 1.27, 1.11) + 100.*bivariate_normal(X1, Y1, 1.0, 1.0, 0.23, 0.72) ZR = bivariate_normal(X1, Y1, 0.1, 0.2, 1.27, 1.11) + 100.*bivariate_normal(X1, Y1, 1.0, 1.0, 0.23, 0.72) ZG = bivariate_normal(X1, Y1, 0.1, 0.2, 2.27, 0.11) + 100.*bivariate_normal(X1, Y1, 1.0, 1.0, 0.43, 0.52) ZB = bivariate_normal(X1, Y1, 0.1, 0.2, 0.27, 2.11) + 100.*bivariate_normal(X1, Y1, 1.0, 1.0, 0.53, 0.92) ZA = bivariate_normal(X1, Y1, 0.1, 0.2, 3.27,-1.11) + 100.*bivariate_normal(X1, Y1, 1.0, 1.0, 0.23, 0.82) Z = np.ndarray(ZR.shape + (4,)) Z[...,0] = ZR /ZR.max() Z[...,1] = ZG /ZG.max() Z[...,2] = ZB /ZB.max() Z[...,3] = np.exp(-ZA /ZA.max()) Z = (Z*255).astype(np.uint8) fig = plt.figure() Z1[Z1>0.9*np.max(Z1)] = +np.inf ax = fig.add_subplot(1,1,1) # col = plt.get_cmap('terrain_r') # col = get_cfunc('RGBWaves200') col = plt.get_cmap('gnuplot2') col = colormap('cubehelix') col = colormap('RGBWaves200') col = ColorBWC(mode=0) i = ax.pcolorfast(x, y, Z1, cmap = col) # i = ax.pcolorfast(x, y, Z) # from matplotlib.image import PcolorImage # i = PcolorImage(ax, x, y, Z) # ax.images.append(i) # xl, xr, yb, yt = x[0], x[-1], y[0], y[-1] # ax.update_datalim(np.array([[xl, yb], [xr, yt]])) # ax.autoscale_view(tight=True) plt.colorbar(i) plt.show() return i from matplotlib.image import PcolorImage def pcolorimage(ax, x, y, Z): """ Make a PcolorImage based on Z = (x, z, 4) [byte] array This is to fix an omission ('bug') in the current (as of this writing) version of MatPlotLib. I may become superfluous in the future. """ img = PcolorImage(ax, x, y, Z) ax.images.append(img) xl, xr, yb, yt = x[0], x[-1], y[0], y[-1] ax.update_datalim(np.array([[xl, yb], [xr, yt]])) ax.autoscale_view(tight=True)
bsd-2-clause
polltooh/traffic_video_analysis
data_process_script/plot_density.py
1
1780
import numpy as np import cv2 import numpy.matlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def plot_conf_mat(densmap_name): fig = plt.figure(figsize = (20,20)) plt.clf() ax = fig.add_subplot(111) #ax.set_aspect(1) densmap = np.fromfile(densmap_name, np.float32) densmap = densmap.reshape(227, 227) densmap *= 100 densmap[densmap > 1] = 1 res = ax.imshow(densmap, cmap = plt.cm.jet, interpolation = 'nearest') plt.savefig('density.jpg') img = cv2.imread("density.jpg") img = cv2.resize(img, (227,227)) cv2.imshow("i", img)# cv2.waitKey(0) #plt.show() def plot_3d(densmap_name): densmap = np.fromfile(densmap_name, np.float32) * 100 #densmap = densmap.reshape(227, 227) x = np.arange(0, 227, 1) y = np.arange(0, 227, 1) xx, yy = np.meshgrid(x, y) xx = xx.reshape(-1) yy = yy.reshape(-1) fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_trisurf(xx, yy, densmap, cmap=plt.cm.jet, linewidth=0.2) plt.show() def norm_image(img): img = (img - np.min(img)) / (np.max(img) - np.min(img)) return img def opencv_plot(des_name): densmap = np.fromfile(densmap_name, np.float32) densmap = np.reshape(densmap, (227, 227)) #densmap = norm_image(densmap) * 100 densmap *= 100.0 densmap[densmap >1 ] = 1 densmap = norm_image(densmap) * 255 densmap = densmap.astype(np.uint8) im_color = cv2.applyColorMap(densmap, cv2.COLORMAP_JET) cv2.imshow("im", im_color) cv2.waitKey(0) densmap_name = "/home/mscvadmin/traffic_video_analysis/nn_script/resdeconv_results/[Cam253]-2016_5_3_18h_200f_000102_resize.npy" opencv_plot(densmap_name) #plot_conf_mat(densmap_name) #@plot_3d(densmap_name)
apache-2.0
kjung/scikit-learn
sklearn/metrics/base.py
9
4649
""" Common code for all metrics """ # Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Olivier Grisel <[email protected]> # Arnaud Joly <[email protected]> # Jochen Wersdorfer <[email protected]> # Lars Buitinck <[email protected]> # Joel Nothman <[email protected]> # Noel Dawe <[email protected]> # License: BSD 3 clause from __future__ import division import numpy as np from ..utils import check_array, check_consistent_length from ..utils.multiclass import type_of_target from ..exceptions import UndefinedMetricWarning as _UndefinedMetricWarning from ..utils import deprecated @deprecated("UndefinedMetricWarning has been moved into the sklearn.exceptions" " module. It will not be available here from version 0.19") class UndefinedMetricWarning(_UndefinedMetricWarning): pass def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None): """Average a binary metric for multilabel classification Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. sample_weight : array-like of shape = [n_samples], optional Sample weights. binary_metric : callable, returns shape [n_classes] The binary metric function to use. Returns ------- score : float or array of shape [n_classes] If not ``None``, average the score, else return the score for each classes. """ average_options = (None, 'micro', 'macro', 'weighted', 'samples') if average not in average_options: raise ValueError('average has to be one of {0}' ''.format(average_options)) y_type = type_of_target(y_true) if y_type not in ("binary", "multilabel-indicator"): raise ValueError("{0} format is not supported".format(y_type)) if y_type == "binary": return binary_metric(y_true, y_score, sample_weight=sample_weight) check_consistent_length(y_true, y_score, sample_weight) y_true = check_array(y_true) y_score = check_array(y_score) not_average_axis = 1 score_weight = sample_weight average_weight = None if average == "micro": if score_weight is not None: score_weight = np.repeat(score_weight, y_true.shape[1]) y_true = y_true.ravel() y_score = y_score.ravel() elif average == 'weighted': if score_weight is not None: average_weight = np.sum(np.multiply( y_true, np.reshape(score_weight, (-1, 1))), axis=0) else: average_weight = np.sum(y_true, axis=0) if average_weight.sum() == 0: return 0 elif average == 'samples': # swap average_weight <-> score_weight average_weight = score_weight score_weight = None not_average_axis = 0 if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_score.ndim == 1: y_score = y_score.reshape((-1, 1)) n_classes = y_score.shape[not_average_axis] score = np.zeros((n_classes,)) for c in range(n_classes): y_true_c = y_true.take([c], axis=not_average_axis).ravel() y_score_c = y_score.take([c], axis=not_average_axis).ravel() score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight) # Average the results if average is not None: return np.average(score, weights=average_weight) else: return score
bsd-3-clause
marshallmcdonnell/interactive_plotting
TraitsUI/matplotlib/traitsui_mpl_qt_test.py
1
2981
#!/usr/bin/python # -*- coding: utf-8 -*- # Pierre Haessig — March 2014 """ Qt adaptation of Gael Varoquaux's tutorial to integrate Matplotlib http://docs.enthought.com/traitsui/tutorials/traits_ui_scientific_app.html#extending-traitsui-adding-a-matplotlib-figure-to-our-application based on Qt-based code shared by Didrik Pinte, May 2012 http://markmail.org/message/z3hnoqruk56g2bje adapted and tested to work with PySide from Anaconda in March 2014 """ from pyface.qt import QtGui, QtCore import matplotlib # We want matplotlib to use a QT backend matplotlib.use('Qt4Agg') from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT from matplotlib.figure import Figure from traits.api import Any, Instance from traitsui.qt4.editor import Editor from traitsui.qt4.basic_editor_factory import BasicEditorFactory class _MPLFigureEditor(Editor): scrollable = True def init(self, parent): self.control = self._create_canvas(parent) self.set_tooltip() def update_editor(self): pass def _create_canvas(self, parent): """ Create the MPL canvas. """ # matplotlib commands to create a canvas frame = QtGui.QWidget() mpl_canvas = FigureCanvas(self.value) mpl_canvas.setParent(frame) mpl_toolbar = NavigationToolbar2QT(mpl_canvas,frame) vbox = QtGui.QVBoxLayout() vbox.addWidget(mpl_canvas) vbox.addWidget(mpl_toolbar) frame.setLayout(vbox) return frame class MPLFigureEditor(BasicEditorFactory): klass = _MPLFigureEditor if __name__ == "__main__": # Create a window to demo the editor from traits.api import HasTraits, Int, Float, on_trait_change from traitsui.api import View, Item from numpy import sin, cos, linspace, pi class Test(HasTraits): figure = Instance(Figure, ()) n = Int(11) a = Float(0.5) view = View(Item('figure', editor=MPLFigureEditor(), show_label=False), Item('n'), Item('a'), width=400, height=300, resizable=True) def __init__(self): super(Test, self).__init__() axes = self.figure.add_subplot(111) self._t = linspace(0, 2*pi, 200) self.plot() @on_trait_change('n,a') def plot(self): t = self._t a = self.a n = self.n axes = self.figure.axes[0] if not axes.lines: axes.plot(sin(t)*(1+a*cos(n*t)), cos(t)*(1+a*cos(n*t))) else: l = axes.lines[0] l.set_xdata(sin(t)*(1+a*cos(n*t))) l.set_ydata(cos(t)*(1+a*cos(n*t))) canvas = self.figure.canvas if canvas is not None: canvas.draw() t = Test() t.configure_traits()
mit
sanja7s/SR_Twitter
src_taxonomy/pie_plot_taxonomies_CODA.py
1
30790
#!/usr/bin/env python # -*- coding: UTF-8 -*- ''' plot the pie plots of the top concepts, entities, taxonomies etc. per community and for the whole dataset also output the stats for the top keywords, concepts, entities and for the sentiment per community (and the whole dataset) ''' from __future__ import unicode_literals # import codecs from collections import defaultdict, OrderedDict import json import glob, os import math import numpy as np import matplotlib.pyplot as plt import sys #ARG = "SR" ARG = "mention/CODA2" #ARG = "ment_SR" f_in = "tweets_taxonomy_clean.JSON" f_in_user_ids = "user_IDs.dat" IN_DIR = "../../../DATA/taxonomy_stats/" #spec_users = ARG + "/communitiesMent.txt" spec_users = ARG + "/rescmtyvv.in.txt" #spec_users = ARG + "/rescmtyvv.out.txt" #spec_users = ARG + "/communitiesMent_SR.txt" #TOP_GROUP = "reciprocal/" #TOP_GROUP = "hubs_SR_0.9/" #DIR_top_users = "TOP_users/" + str(TOP_GROUP) #PREFIX = "100_top_" ################################################## # read in a map for the twitter username --> id ################################################## def read_user_IDs(): user_ids = defaultdict(str) with codecs.open(f_in_user_ids,'r', encoding='utf8') as f: for line in f: line = line.split() user_id = line[0] user = line[1] user_ids[user] = user_id return user_ids ################################################## # read in the users (top in something) ################################################## def read_TOP_users(): user_ids = defaultdict(str) for top_users_file in os.listdir(DIR_top_users): if not top_users_file.startswith(PREFIX): continue with codecs.open(os.path.join(DIR_top_users, top_users_file),'r', encoding='utf8') as f: user_ids[top_users_file] = defaultdict(int) for line in f: line = line.split() user_id = line[0] user_ids[top_users_file][user_id] = 1 return user_ids #################################################################################################### # return top communities larger than sizeN, as many as there are of that size # in a form of a dictionary: {community_id: defaultdict{id_usr1:1, id_usr2:1, ...}} # and also return another dict, as a map (res3) to tell us the community id of a user # and finally the whole set of communities (not limited in size) # and a similar map in res4 #################################################################################################### def read_in_communities(sizeN=300): res = defaultdict(int) res7s = defaultdict(int) res3 = defaultdict(int) res3 = defaultdict(lambda: -1, res3) #res4 = defaultdict(int) #res4 = defaultdict(lambda: -1, res4) f = open(spec_users, "r") i = 0 for line in f: com_id = i res[com_id] = defaultdict(int) users = line.split() for user_id in users: res[com_id][user_id] = 1 i += 1 for com in res: if len(res[com]) >= sizeN: res7s[com] = res[com] for usr in res[com]: res4[usr] = com for com in res7s: for usr in res7s[com]: res3[usr] = com return res7s, res3, res, res4 #################################################################################################### # in order to calculate TF-IDF for concepts, keywords and entities, we treat communities as documents # here we extract document frequency for each of them (a one pass through the taxonomy dataset) #################################################################################################### def community_IDFs(user_com): # resulting dictionaries in which the needed counts for com_IDFs are collected keywords_sum = defaultdict(int) entities_sum = defaultdict(int) concepts_sum = defaultdict(int) # taxonomies_sum = defaultdict(int) # holds all the user ids (username --> user_id, map) user_ids = read_user_IDs() cnt = 0 with codecs.open(f_in,'r', encoding='utf8') as input_file: for line7s in input_file: try: line = json.loads(line7s) user_name = line["_id"] user_id = user_ids[user_name] # so here we assign the COM of that user COM = user_com[user_id] taxonomy_all = line["taxonomy"] keywords = taxonomy_all["keywords"] entities = taxonomy_all["entities"] concepts = taxonomy_all["concepts"] # taxonomy = taxonomy_all["taxonomy"] # this counts how many user we have analyzed cnt += 1 except KeyError: #print line7s # we don't print since it is tested, there some 10% users for whom # the taxonomy was not successfuly downloaded and they would be listed here continue for el in keywords: category = el["text"] category = category.lower() # if we first time encounter this keyword, add a list for it in the result if not category in keywords_sum: keywords_sum[category] = [] # we just put in a list all the recorded communities where this is found keywords_sum[category].append(COM) for el in entities: entity = el["text"] if entity in ['#', '#MentionTo', 'twitter', 'Twitter']: continue entity = entity.lower() # if we first time encounter this entity, add a list for it in the result if not entity in entities_sum: entities_sum[entity] = [] # we just put in a list all the recorded communities where this is found entities_sum[entity].append(COM) for el in concepts: concept = el["text"] concept = conc.lower() # if we first time encounter this concept, add a list for it in the result if not concept in concepts_sum: concepts_sum[concept] = [] # we just put in a list all the recorded communities where this is found concepts_sum[concept].append(COM) for el in taxonomy: taxonomy_tree = el["label"] taxon = taxonomy_tree # if we first time encounter this taxon, add a list for it in the result if not taxon in taxonomies_sum: taxonomies_sum[taxon] = [] taxonomies_sum[taxon].append(COM) # now we count for each keyword, entitiy or concept in how many distinct communities they were recorded keywords_res = defaultdict(int) entities_res = defaultdict(int) concepts_res = defaultdict(int) # taxonomies_res = defaultdict(int) for el in keywords_sum: keywords_res[el] = len(set(keywords_sum[el])) for el in entities_sum: entities_res[el] = len(set(entities_sum[el])) for el in concepts_sum: concepts_res[el] = len(set(concepts_sum[el])) for el in taxonomies_sum: taxonomies_res[el] = len(set(taxonomies_sum[el])) return keywords_res, entities_res, concepts_res, taxonomies_res ################################################## # the core function ################################################## """ here, the options are to visualize the taxonomy for the whole dataset (COM="ALL") and to visualize for different communities (COM="COM") that are read in through read_in_communities() in the case of communities, this functions is invoked once per each community -- user_list holds the ids of the users in one community -- TOP_N holds the number of top concepts, keywords and entities that we want to visualize and record -- user_com holds a map for user_id --> com_id -- N_COM holds the total number of communities found (changes depending on the community detection algorithm) """ def visualize_taxonomy_pies(COM="ALL", user_list=None, TOP_N=10, user_com=None, N_COM=0): # resulting dictionaries in which the counts and tfidf relevance are collected keywords_sum = defaultdict(int) entities_sum = defaultdict(int) concepts_sum = defaultdict(int) taxonomies_sum = defaultdict(int) # docSentiment_sum = defaultdict(int) # holds all the user ids user_ids = read_user_IDs() cnt = 0 with codecs.open(f_in,'r', encoding='utf8') as input_file: for line7s in input_file: try: line = json.loads(line7s) # if dealing with a community, check the user membership if COM <> "ALL": user_name = line["_id"] user_id = user_ids[user_name] if user_list[user_id] == 0: continue # if dealing with ALL, take all the users taxonomy_all = line["taxonomy"] keywords = taxonomy_all["keywords"] entities = taxonomy_all["entities"] concepts = taxonomy_all["concepts"] taxonomy = taxonomy_all["taxonomy"] # docSentiment = taxonomy_all["docSentiment"] # this counts how many user we have analyzed cnt += 1 except KeyError: #print line7s # we don't print since it is tested, there some 10% users for whom # the taxonomy was not successfuly downloaded and they would be listed here continue for el in keywords: category = el["text"] category = category.lower() # if we first time encounter this keyword, add a dict for it in the result if not category in keywords_sum: keywords_sum[category] = defaultdict(int) # we use this not so well coded part because tuples do not allow assignment old_relev = keywords_sum[category][0] old_cnt = keywords_sum[category][1] new_relev = old_relev + float(el["relevance"]) new_cnt = old_cnt + 1 keywords_sum[category] = (new_relev, new_cnt) for el in entities: entity = el["text"] if entity in ['#', '#MentionTo', 'twitter', 'Twitter']: continue entity = entity.lower() # if we first time encounter this entity, add a dict for it in the result if not entity in entities_sum: entities_sum[entity] = defaultdict(int) # we use this not so well coded part because tuples do not allow assignment old_relev = entities_sum[entity][0] old_cnt = entities_sum[entity][1] new_relev = old_relev + float(el["relevance"]) new_cnt = old_cnt + 1 entities_sum[entity] = (new_relev, new_cnt, el["type"]) for el in concepts: concept = el["text"] concept = concept.lower() # if we first time encounter this concept, add a dict for it in the result if not concept in concepts_sum: concepts_sum[concept] = defaultdict(int) # we use this not so well coded part because tuples do not allow assignment old_relev = concepts_sum[concept][0] old_cnt = concepts_sum[concept][1] new_relev = old_relev + float(el["relevance"]) new_cnt = old_cnt + 1 concepts_sum[concept] = (new_relev, new_cnt) # a bit different procedure for extracting the sentiment sentiment = docSentiment["type"] if sentiment == "neutral": docSentiment_sum[sentiment] += 1 else: if not sentiment in docSentiment_sum: docSentiment_sum[sentiment] = defaultdict(int) old_score = docSentiment_sum[sentiment][0] old_cnt = docSentiment_sum[sentiment][1] old_mixed_cnt = docSentiment_sum[sentiment][2] try: new_score = old_score + float(docSentiment["score"]) except KeyError: continue new_cnt = old_cnt + 1 try: new_mixed_cnt = old_mixed_cnt + int(docSentiment["mixed"]) except KeyError: continue docSentiment_sum[sentiment] = (new_score, new_cnt, new_mixed_cnt) for el in taxonomy: taxonomy_tree = el["label"] taxon = taxonomy_tree if not taxon in taxonomies_sum: taxonomies_sum[taxon] = defaultdict(int) old_score = taxonomies_sum[taxon][0] old_cnt = taxonomies_sum[taxon][1] new_score = old_score + float(el["score"]) new_cnt = old_cnt + 1 taxonomies_sum[taxon] = (new_score, new_cnt) com_size = cnt # THIS IS A CONSTANT, because we know how many users there are in total after we did one ALL run N = 27665 print "*** The community %s ***" % COM print "Analyzed %d users out of total %d users " % (com_size, N) pos_users = docSentiment_sum["positive"][1] neg_users = docSentiment_sum["negative"][1] try: neu_users = docSentiment_sum["neutral"] except TypeError: neu_users = 0 pos_score = docSentiment_sum["positive"][0] neg_score = docSentiment_sum["negative"][0] print "___________________" print "Sentiment stats: positive %d users; negative %d users; and neutral %d " % (pos_users, neg_users, neu_users) print "Sentiment score: positive %f ; negative %f; and the sum sentiment %f " % (pos_score, neg_score, pos_score + neg_score) print "Overall positive sentiment pct is %f " % (float(pos_users)/com_size) print "___________________" print "Total keywords found ", len(keywords_sum) print "Total entities found ", len(entities_sum) print "Total concepts found ", len(concepts_sum) print "Total taxonomies on different levels found ", len(taxonomies_sum) print "___________________" # if we deal with communities, then the number of documents is the total number of communities # and IDF values are found with help of the function community_IDFs if COM <> 'ALL': keywords_res, entities_res, concepts_res, taxonomies_res = community_IDFs(user_com) N = N_COM ##################### ## STARTS plotting ## ##################### if COM == 'ALL': os.chdir('ALL/pie_plots') else: os.chdir(ARG + '/pie_plots_in') #os.chdir(ARG + '/pie_plots_0.2') ##################### ## KEYWORDS ## ##################### for kw in keywords_sum: tot_relev = keywords_sum[kw][0] tot_cnt = keywords_sum[kw][1] # v1 for ALL if COM == 'ALL': inv_kw_fq = 0 if tot_cnt == 0 else N/float(tot_cnt) tfidf = float(tot_relev * math.log(1.0 + inv_kw_fq)) else: # v2 THIS ONE IS USED for COMMUNITIES com_N = keywords_res[kw] inv_fq = 0 if com_N == 0 else N/float(com_N) tfidf = float(tot_cnt * math.log(1.0 + inv_fq)) keywords_sum[kw] = (tot_relev, tot_cnt, tfidf) print print "Keywords (ordered by TF-IDF): [relevance, count, TF-IDF]" ord_keywords_sum2 = OrderedDict(sorted(keywords_sum.items(), key=lambda x: x[1][2], reverse = True)) labels = np.empty([TOP_N], dtype="<U26") sizes = np.empty([TOP_N], dtype=float) sizes_tot = np.empty([TOP_N], dtype=float) i = 0 for el in ord_keywords_sum2: print el, ord_keywords_sum2[el] labels[i] = el sizes[i] = float(ord_keywords_sum2[el][2]) sizes_tot[i] = float(ord_keywords_sum2[el][0]) i += 1 if i == TOP_N: break plot_pie(labels, sizes, "kw_tfid_com_" + str(COM) + "_top_" + str(TOP_N) + ".png") plt.clf() plot_pie(labels, sizes_tot, "kw_com_" + str(COM) + "_top_" + str(TOP_N) + ".png") plt.clf() print ##################### ## ENTITIES ## ##################### for en in entities_sum: tot_relev = entities_sum[en][0] tot_cnt = entities_sum[en][1] # v1 for ALL if COM == 'ALL': inv_ent_fq = 0 if tot_cnt == 0 else N/float(tot_cnt) tfidf = tot_relev * math.log(1.0 + inv_ent_fq) else: # v2 THIS IS USED for COMMUNITIES com_N = entities_res[en] inv_fq = 0 if com_N == 0 else N/float(com_N) tfidf = float(tot_cnt * math.log(1.0 + inv_fq)) entities_sum[en] = (tot_relev, tot_cnt, tfidf) print "Entities (sorted by TF-IDF): [relevance, count, TF-IDF]" ord_entities_sum2 = OrderedDict(sorted(entities_sum.items(), key=lambda x: x[1][2], reverse = True)) labels = np.empty([TOP_N], dtype="<U26") sizes = np.empty([TOP_N], dtype=float) sizes_tot = np.empty([TOP_N], dtype=float) i = 0 for el in ord_entities_sum2: print el, ord_entities_sum2[el] labels[i] = el #print labels sizes_tot[i] = float(ord_entities_sum2[el][0]) sizes[i] = float(ord_entities_sum2[el][2]) i += 1 if i == TOP_N: break plot_pie(labels, sizes, "ent_tfidf_com_" + str(COM) + "_top_" + str(TOP_N) + ".png") plt.clf() plot_pie(labels, sizes_tot, "ent_com_" + str(COM) + "_top_" + str(TOP_N) + ".png") plt.clf() print ##################### ## CONCEPTS ## ##################### for conc in concepts_sum: tot_relev = concepts_sum[conc][0] tot_cnt = concepts_sum[conc][1] # v1 COM = ALL if COM == 'ALL': inv_fq = 0 if tot_cnt == 0 else N/float(tot_cnt) tfidf = float(tot_relev * math.log(1.0 + inv_fq)) else: # v2 THIS IS USED for COMMUNITIES com_N = concepts_res[conc] inv_fq = 0 if com_N == 0 else N/float(com_N) tfidf = float(tot_cnt * math.log(1.0 + inv_fq)) concepts_sum[conc] = (tot_relev, tot_cnt, tfidf) print "Concepts (sorted by TF-IDF): [relevance, count, TF-IDF]" ord_concepts_sum = OrderedDict(sorted(concepts_sum.items(), key=lambda x: x[1][2], reverse = True)) labels = np.empty([TOP_N], dtype="<U26") sizes = np.empty([TOP_N], dtype=float) sizes_tot = np.empty([TOP_N], dtype=float) i = 0 for el in ord_concepts_sum: print el, ord_concepts_sum[el] labels[i] = el sizes[i] = float(ord_concepts_sum[el][2]) sizes_tot[i] = float(ord_concepts_sum[el][0]) i += 1 if i == TOP_N: break plot_pie(labels, sizes, "concept_tfidf_com_" + str(COM) + "_top_" + str(TOP_N) + ".png") plt.clf() plot_pie(labels, sizes_tot, "concept_com_" + str(COM) + "_top_" + str(TOP_N) + ".png") plt.clf() print ##################### ## TAXONOMIES ## ##################### for taxon in taxonomies_sum: tot_score = taxonomies_sum[taxon][0] tot_cnt = taxonomies_sum[taxon][1] # v1 COM = ALL if COM == 'ALL': inv_fq = 0 if tot_cnt == 0 else N/float(tot_cnt) tfidf = float(tot_score * math.log(1.0 + inv_fq)) else: # v2 THIS IS USED for COMMUNITIES com_N = taxonomies_res[taxon] inv_fq = 0 if com_N == 0 else N/float(com_N) tfidf = float(tot_cnt * math.log(1.0 + inv_fq)) taxonomies_sum[taxon] = (tot_score, tot_cnt, tfidf) print "Taxonomies (sorted by TF-IDF): [relevance, count, TF-IDF]" ord_taxonomies_sum = OrderedDict(sorted(taxonomies_sum.items(), key=lambda x: x[1][2], reverse = True)) labels = np.empty([TOP_N], dtype="<U26") sizes = np.empty([TOP_N], dtype=float) sizes_tot = np.empty([TOP_N], dtype=float) i = 0 for el in ord_taxonomies_sum: print el, ord_taxonomies_sum[el] labels[i] = el sizes[i] = float(ord_taxonomies_sum[el][2]) sizes_tot[i] = float(ord_taxonomies_sum[el][0]) i += 1 if i == TOP_N: break plot_pie(labels, sizes, "taxon_tfidf_com_" + str(COM) + "_top_" + str(TOP_N) + ".png") plt.clf() plot_pie(labels, sizes_tot, "taxon_com_" + str(COM) + "_top_" + str(TOP_N) + ".png") plt.clf() print os.chdir("../../../") ################################################## # the core function for the user lists (TOP etc.) ################################################## """ here, the options are to visualize the taxonomy for the whole dataset (COM="ALL") and to visualize for different communities (COM="COM") that are read in through read_in_communities() in the case of communities, this functions is invoked once per each community -- user_list holds the ids of the users in one community -- TOP_N holds the number of top concepts, keywords and entities that we want to visualize and record -- user_com holds a map for user_id --> com_id -- N_COM holds the total number of communities found (changes depending on the community detection algorithm) """ def visualize_taxonomy_pies_user_list(user_ids, COM, user_list=None, TOP_N=20): # resulting dictionaries in which the counts and tfidf relevance are collected keywords_sum = defaultdict(int) entities_sum = defaultdict(int) concepts_sum = defaultdict(int) taxonomies_sum = defaultdict(int) # docSentiment_sum = defaultdict(int) cnt = 0 with codecs.open(f_in,'r', encoding='utf8') as input_file: for line7s in input_file: try: line = json.loads(line7s) user_name = line["_id"] user_id = user_ids[user_name] if user_list[user_id] == 0: continue # if dealing with ALL, take all the users taxonomy_all = line["taxonomy"] keywords = taxonomy_all["keywords"] entities = taxonomy_all["entities"] concepts = taxonomy_all["concepts"] taxonomy = taxonomy_all["taxonomy"] # docSentiment = taxonomy_all["docSentiment"] # this counts how many user we have analyzed cnt += 1 except KeyError: #print line7s # we don't print since it is tested, there some 10% users for whom # the taxonomy was not successfuly downloaded and they would be listed here continue for el in keywords: category = el["text"] # if we first time encounter this keyword, add a dict for it in the result if not category in keywords_sum: keywords_sum[category] = defaultdict(int) # we use this not so well coded part because tuples do not allow assignment old_relev = keywords_sum[category][0] old_cnt = keywords_sum[category][1] new_relev = old_relev + float(el["relevance"]) new_cnt = old_cnt + 1 keywords_sum[category] = (new_relev, new_cnt) for el in entities: entity = el["text"] # if we first time encounter this entity, add a dict for it in the result if not entity in entities_sum: entities_sum[entity] = defaultdict(int) # we use this not so well coded part because tuples do not allow assignment old_relev = entities_sum[entity][0] old_cnt = entities_sum[entity][1] new_relev = old_relev + float(el["relevance"]) new_cnt = old_cnt + 1 entities_sum[entity] = (new_relev, new_cnt, el["type"]) for el in concepts: concept = el["text"] # if we first time encounter this concept, add a dict for it in the result if not concept in concepts_sum: concepts_sum[concept] = defaultdict(int) # we use this not so well coded part because tuples do not allow assignment old_relev = concepts_sum[concept][0] old_cnt = concepts_sum[concept][1] new_relev = old_relev + float(el["relevance"]) new_cnt = old_cnt + 1 concepts_sum[concept] = (new_relev, new_cnt) # a bit different procedure for extracting the sentiment sentiment = docSentiment["type"] if sentiment == "neutral": docSentiment_sum[sentiment] += 1 else: if not sentiment in docSentiment_sum: docSentiment_sum[sentiment] = defaultdict(int) old_score = docSentiment_sum[sentiment][0] old_cnt = docSentiment_sum[sentiment][1] old_mixed_cnt = docSentiment_sum[sentiment][2] try: new_score = old_score + float(docSentiment["score"]) except KeyError: continue new_cnt = old_cnt + 1 try: new_mixed_cnt = old_mixed_cnt + int(docSentiment["mixed"]) except KeyError: continue docSentiment_sum[sentiment] = (new_score, new_cnt, new_mixed_cnt) for el in taxonomy: taxonomy_tree = el["label"] taxon = taxonomy_tree if not taxon in taxonomies_sum: taxonomies_sum[taxon] = defaultdict(int) old_score = taxonomies_sum[taxon][0] old_cnt = taxonomies_sum[taxon][1] new_score = old_score + float(el["score"]) new_cnt = old_cnt + 1 taxonomies_sum[taxon] = (new_score, new_cnt) com_size = cnt # THIS IS A CONSTANT, because we know how many users there are in total after we did one ALL run N = 27665 print "*** The user list %s ***" % COM print "Analyzed %d users out of total %d users " % (com_size, N) try: pos_users = docSentiment_sum["positive"][1] pos_score = docSentiment_sum["positive"][0] except TypeError: pos_users = 0 pos_score = 0 try: neg_users = docSentiment_sum["negative"][1] neg_score = docSentiment_sum["negative"][0] except TypeError: neg_users = 0 neg_score = 0 try: neu_users = docSentiment_sum["neutral"] except TypeError: neu_users = 0 print "___________________" print "Sentiment stats: positive %d users; negative %d users; and neutral %d " % (pos_users, neg_users, neu_users) print "Sentiment score: positive %f ; negative %f; and the sum sentiment %f " % (pos_score, neg_score, pos_score + neg_score) print "Overall positive sentiment pct is %f " % (float(pos_users)/com_size) print "___________________" print "Total keywords found ", len(keywords_sum) print "Total entities found ", len(entities_sum) print "Total concepts found ", len(concepts_sum) print "Total taxonomies on different levels found ", len(taxonomies_sum) print "___________________" ##################### ## STARTS plotting ## ##################### os.chdir(DIR_top_users) ##################### ## KEYWORDS ## ##################### for kw in keywords_sum: tot_relev = keywords_sum[kw][0] tot_cnt = keywords_sum[kw][1] inv_kw_fq = 0 if tot_cnt == 0 else N/float(tot_cnt) tfidf = float(tot_relev * math.log(1.0 + inv_kw_fq)) keywords_sum[kw] = (tot_relev, tot_cnt, tfidf) print print "Keywords (ordered by TF-IDF): [relevance, count, TF-IDF]" ord_keywords_sum2 = OrderedDict(sorted(keywords_sum.items(), key=lambda x: x[1][0], reverse = True)) labels = np.empty([TOP_N], dtype="<U26") sizes = np.empty([TOP_N], dtype=float) sizes_tot = np.empty([TOP_N], dtype=float) i = 0 for el in ord_keywords_sum2: print el, ord_keywords_sum2[el] labels[i] = el sizes[i] = float(ord_keywords_sum2[el][2]) sizes_tot[i] = float(ord_keywords_sum2[el][0]) i += 1 if i == TOP_N: break #plot_pie(labels, sizes, "kw_tfid_com_" + str(COM) + ".png") #plt.clf() plot_pie(labels, sizes_tot, "kw_com_" + str(COM) + ".png") plt.clf() print ##################### ## ENTITIES ## ##################### for en in entities_sum: tot_relev = entities_sum[en][0] tot_cnt = entities_sum[en][1] inv_ent_fq = 0 if tot_cnt == 0 else N/float(tot_cnt) tfidf = tot_relev * math.log(1.0 + inv_ent_fq) entities_sum[en] = (tot_relev, tot_cnt, tfidf) print "Entities (sorted by TF-IDF): [relevance, count, TF-IDF]" ord_entities_sum2 = OrderedDict(sorted(entities_sum.items(), key=lambda x: x[1][0], reverse = True)) labels = np.empty([TOP_N], dtype="<U26") sizes = np.empty([TOP_N], dtype=float) sizes_tot = np.empty([TOP_N], dtype=float) i = 0 for el in ord_entities_sum2: print el, ord_entities_sum2[el] labels[i] = el #print labels sizes_tot[i] = float(ord_entities_sum2[el][0]) sizes[i] = float(ord_entities_sum2[el][2]) i += 1 if i == TOP_N: break #plot_pie(labels, sizes, "ent_tfidf_com_" + str(COM) + ".png") #plt.clf() plot_pie(labels, sizes_tot, "ent_com_" + str(COM) + ".png") plt.clf() print ##################### ## CONCEPTS ## ##################### for conc in concepts_sum: tot_relev = concepts_sum[conc][0] tot_cnt = concepts_sum[conc][1] inv_fq = 0 if tot_cnt == 0 else N/float(tot_cnt) tfidf = float(tot_relev * math.log(1.0 + inv_fq)) concepts_sum[conc] = (tot_relev, tot_cnt, tfidf) print "Concepts (sorted by TF-IDF): [relevance, count, TF-IDF]" ord_concepts_sum = OrderedDict(sorted(concepts_sum.items(), key=lambda x: x[1][0], reverse = True)) labels = np.empty([TOP_N], dtype="<U26") sizes = np.empty([TOP_N], dtype=float) sizes_tot = np.empty([TOP_N], dtype=float) i = 0 for el in ord_concepts_sum: print el, ord_concepts_sum[el] labels[i] = el sizes[i] = float(ord_concepts_sum[el][2]) sizes_tot[i] = float(ord_concepts_sum[el][0]) i += 1 if i == TOP_N: break #plot_pie(labels, sizes, "concept_tfidf_" + str(COM) + ".png") #plt.clf() plot_pie(labels, sizes_tot, "concept_" + str(COM) + ".png") plt.clf() print ##################### ## TAXONOMIES ## ##################### for taxon in taxonomies_sum: tot_score = taxonomies_sum[taxon][0] tot_cnt = taxonomies_sum[taxon][1] inv_fq = 0 if tot_cnt == 0 else N/float(tot_cnt) tfidf = float(tot_score * math.log(1.0 + inv_fq)) taxonomies_sum[taxon] = (tot_score, tot_cnt, tfidf) print "Taxonomies (sorted by TF-IDF): [relevance, count, TF-IDF]" ord_taxonomies_sum = OrderedDict(sorted(taxonomies_sum.items(), key=lambda x: x[1][0], reverse = True)) labels = np.empty([TOP_N], dtype="<U26") sizes = np.empty([TOP_N], dtype=float) sizes_tot = np.empty([TOP_N], dtype=float) i = 0 for el in ord_taxonomies_sum: print el, ord_taxonomies_sum[el] labels[i] = el sizes[i] = float(ord_taxonomies_sum[el][2]) sizes_tot[i] = float(ord_taxonomies_sum[el][0]) i += 1 if i == TOP_N: break #plot_pie(labels, sizes, "taxon_tfidf_" + str(COM) + ".png") #plt.clf() plot_pie(labels, sizes_tot, "taxon_" + str(COM) + ".png") plt.clf() print os.chdir("../../") def plot_pie(labels, sizes, f_pie_name): #colors = ['yellowgreen', 'mediumpurple', 'lightskyblue', 'lightcoral'] #explode = np.zeros(sizes.shape[0]) # proportion with which to offset each wedge plt.pie(sizes, # data #explode=explode, # offset parameters labels=labels, # slice labels #colors=colors, # array of colours autopct='%1.1f%%', # print the values inside the wedges shadow=True, # enable shadow startangle=70 # starting angle ) #plt.axis('equal') plt.savefig(f_pie_name) def main_TOP_users(): os.chdir(IN_DIR) user_ids = read_user_IDs() TOP_users_lists = read_TOP_users() for top_list in TOP_users_lists: sys.stdout = open(DIR_top_users + "STATS_" + top_list, 'w') visualize_taxonomy_pies_user_list(user_ids, top_list, user_list=TOP_users_lists[top_list]) def main(COM='ALL'): os.chdir(IN_DIR) if COM == "ALL": sys.stdout = open('ALL/top_20_stats', 'w') visualize_taxonomy_pies("ALL") else: sys.stdout = open(ARG + '/com_CODA_stats_in', 'w') sizeN = 500 top_communities, com_id_map, all_communities, all_com_id_map = read_in_communities(sizeN) print len(top_communities), "top communities found of size ", str(sizeN) NALL = len(all_communities) print NALL, "all communities found" for community in top_communities: visualize_taxonomy_pies(str(community), user_list=top_communities[community], TOP_N=20, user_com=all_com_id_map, N_COM=NALL) # other possible argument is 'COM' or any other string to print pies for the communities # 'ALL' prints the pie stats for the whole dataset # cannot call these at the same time, but in two python script calls ############################################################################### #main('ALL') # runme as # python pie_plot_taxonomies.py > "/home/sscepano/Projects7s/Twitter-workspace/DATA/taxonomy_stats/ALL/pie_plots/ALL_stats.txt" ############################################################################### ############################################################################### main('COM') # runme as # python pie_plot_taxonomies.py > "/home/sscepano/Projects7s/Twitter-workspace/DATA/taxonomy_stats/SR/pie_plots_0.6/com_20_stats.txt" # or # python pie_plot_taxonomies.py > "/home/sscepano/Projects7s/Twitter-workspace/DATA/taxonomy_stats/SR_that_mention/pie_plots/com_20_stats.txt" ############################################################################### ############################################################################### # I am a bit different ############################################################################### #main_TOP_users() ###############################################################################
mit
mhdella/scikit-learn
examples/manifold/plot_lle_digits.py
181
8510
""" ============================================================================= Manifold learning on handwritten digits: Locally Linear Embedding, Isomap... ============================================================================= An illustration of various embeddings on the digits dataset. The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not technically a manifold embedding method, as it learn a high-dimensional representation on which we apply a dimensionality reduction method. However, it is often useful to cast a dataset into a representation in which the classes are linearly-separable. t-SNE will be initialized with the embedding that is generated by PCA in this example, which is not the default setting. It ensures global stability of the embedding, i.e., the embedding does not depend on random initialization. """ # Authors: Fabian Pedregosa <[email protected]> # Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Gael Varoquaux # License: BSD 3 clause (C) INRIA 2011 print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from matplotlib import offsetbox from sklearn import (manifold, datasets, decomposition, ensemble, lda, random_projection) digits = datasets.load_digits(n_class=6) X = digits.data y = digits.target n_samples, n_features = X.shape n_neighbors = 30 #---------------------------------------------------------------------- # Scale and visualize the embedding vectors def plot_embedding(X, title=None): x_min, x_max = np.min(X, 0), np.max(X, 0) X = (X - x_min) / (x_max - x_min) plt.figure() ax = plt.subplot(111) for i in range(X.shape[0]): plt.text(X[i, 0], X[i, 1], str(digits.target[i]), color=plt.cm.Set1(y[i] / 10.), fontdict={'weight': 'bold', 'size': 9}) if hasattr(offsetbox, 'AnnotationBbox'): # only print thumbnails with matplotlib > 1.0 shown_images = np.array([[1., 1.]]) # just something big for i in range(digits.data.shape[0]): dist = np.sum((X[i] - shown_images) ** 2, 1) if np.min(dist) < 4e-3: # don't show points that are too close continue shown_images = np.r_[shown_images, [X[i]]] imagebox = offsetbox.AnnotationBbox( offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r), X[i]) ax.add_artist(imagebox) plt.xticks([]), plt.yticks([]) if title is not None: plt.title(title) #---------------------------------------------------------------------- # Plot images of the digits n_img_per_row = 20 img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row)) for i in range(n_img_per_row): ix = 10 * i + 1 for j in range(n_img_per_row): iy = 10 * j + 1 img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8)) plt.imshow(img, cmap=plt.cm.binary) plt.xticks([]) plt.yticks([]) plt.title('A selection from the 64-dimensional digits dataset') #---------------------------------------------------------------------- # Random 2D projection using a random unitary matrix print("Computing random projection") rp = random_projection.SparseRandomProjection(n_components=2, random_state=42) X_projected = rp.fit_transform(X) plot_embedding(X_projected, "Random Projection of the digits") #---------------------------------------------------------------------- # Projection on to the first 2 principal components print("Computing PCA projection") t0 = time() X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X) plot_embedding(X_pca, "Principal Components projection of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Projection on to the first 2 linear discriminant components print("Computing LDA projection") X2 = X.copy() X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible t0 = time() X_lda = lda.LDA(n_components=2).fit_transform(X2, y) plot_embedding(X_lda, "Linear Discriminant projection of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Isomap projection of the digits dataset print("Computing Isomap embedding") t0 = time() X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X) print("Done.") plot_embedding(X_iso, "Isomap projection of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Locally linear embedding of the digits dataset print("Computing LLE embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='standard') t0 = time() X_lle = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_lle, "Locally Linear Embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Modified Locally linear embedding of the digits dataset print("Computing modified LLE embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='modified') t0 = time() X_mlle = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_mlle, "Modified Locally Linear Embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # HLLE embedding of the digits dataset print("Computing Hessian LLE embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='hessian') t0 = time() X_hlle = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_hlle, "Hessian Locally Linear Embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # LTSA embedding of the digits dataset print("Computing LTSA embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='ltsa') t0 = time() X_ltsa = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_ltsa, "Local Tangent Space Alignment of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # MDS embedding of the digits dataset print("Computing MDS embedding") clf = manifold.MDS(n_components=2, n_init=1, max_iter=100) t0 = time() X_mds = clf.fit_transform(X) print("Done. Stress: %f" % clf.stress_) plot_embedding(X_mds, "MDS embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Random Trees embedding of the digits dataset print("Computing Totally Random Trees embedding") hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0, max_depth=5) t0 = time() X_transformed = hasher.fit_transform(X) pca = decomposition.TruncatedSVD(n_components=2) X_reduced = pca.fit_transform(X_transformed) plot_embedding(X_reduced, "Random forest embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Spectral embedding of the digits dataset print("Computing Spectral embedding") embedder = manifold.SpectralEmbedding(n_components=2, random_state=0, eigen_solver="arpack") t0 = time() X_se = embedder.fit_transform(X) plot_embedding(X_se, "Spectral embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # t-SNE embedding of the digits dataset print("Computing t-SNE embedding") tsne = manifold.TSNE(n_components=2, init='pca', random_state=0) t0 = time() X_tsne = tsne.fit_transform(X) plot_embedding(X_tsne, "t-SNE embedding of the digits (time %.2fs)" % (time() - t0)) plt.show()
bsd-3-clause
keflavich/APEX_CMZ_H2CO
plot_codes/figure_ratio_maps.py
2
11405
import numpy as np from astropy import units as u from paths import h2copath,figurepath,hpath import copy import os import aplpy import pylab as pl import matplotlib from astropy.io import fits from masked_cubes import cube303m, cube321m, cube303msm, cube321msm, sncube, sncubesm from ratio_cubes import ratiocube_303321, ratiocubesm_303321 from piecewise_rtotem import pwtem from astropy import log import paths matplotlib.rc_file(paths.pcpath('pubfiguresrc')) pl.ioff() pl.close(1) pl.close(2) cm = matplotlib.cm.RdYlBu_r cm.set_bad('#888888') figsize = (20,10) small_recen = dict(x=0.3, y=-0.03,width=1.05,height=0.27) big_recen = dict(x=0.55, y=-0.075,width=2.3,height=0.40) max_ratio = 0.51 # Old version: integrated ratio maps (this is still used in publication) for weighted in ("","_weighted","_masked_weighted"): for bl in ("_bl",""): for smooth in ("","_smooth",):#"_vsmooth"): ratio1 = 'H2CO_321220_to_303202{0}{1}_integ{2}.fits'.format(smooth,bl,weighted) ratio2 = 'H2CO_322221_to_303202{0}{1}_integ{2}.fits'.format(smooth,bl,weighted) for ii,ratio in enumerate((ratio1, ratio2)): log.info(ratio) fig = pl.figure(ii+1, figsize=figsize) fig.clf() F = aplpy.FITSFigure(os.path.join(h2copath, ratio), convention='calabretta', figure=fig) F.show_colorscale(cmap=cm, vmin=0, vmax=max_ratio) F.add_colorbar() F.tick_labels.set_xformat('d.dd') F.tick_labels.set_yformat('d.dd') F.recenter(**small_recen) F.save(os.path.join(figurepath, 'big_maps', ratio.replace(".fits",".pdf"))) F.recenter(**big_recen) F.save(os.path.join(figurepath, 'big_maps', "big_"+ratio.replace(".fits",".pdf"))) dustcolumn = '/Users/adam/work/gc/gcmosaic_column_conv36.fits' F.show_contour(dustcolumn, levels=[5], colors=[(0,0,0,0.5)], zorder=15, alpha=0.5, linewidths=[0.5], layer='dustcontour') F.save(os.path.join(figurepath, 'big_maps', "big_"+ratio.replace(".fits","_withcontours.pdf"))) # Dendrogram mean ratios for weighted in ("","weighted"): for smooth in ("","_smooth",):#"_vsmooth"): ratio = 'RatioCube_DendrogramObjects{0}_Piecewise_{1}mean.fits'.format(smooth,weighted) log.info(ratio) fig = pl.figure(1, figsize=figsize) fig.clf() F = aplpy.FITSFigure(os.path.join(h2copath, ratio), convention='calabretta', figure=fig) F.show_colorscale(cmap=cm, vmin=0, vmax=max_ratio) F.add_colorbar() F.tick_labels.set_xformat('d.dd') F.tick_labels.set_yformat('d.dd') F.recenter(**small_recen) F.save(os.path.join(figurepath, 'big_maps', ratio.replace(".fits",".pdf"))) F.recenter(**big_recen) F.save(os.path.join(figurepath, 'big_maps', "big_"+ratio.replace(".fits",".pdf"))) # "new" version: 20 km/s slices # (used for analysis, not for publication) vcuts = np.arange(-60,141,20) fig = pl.figure(1, figsize=figsize) for cube,sn,smooth in zip((ratiocube_303321, ratiocubesm_303321), (sncube, sncubesm), ("","_smooth",)):#"_vsmooth"): for vrange in zip(vcuts[:-1], vcuts[1:]): proj = cube.spectral_slab(*(vrange*u.km/u.s)).mean(axis=0) fig.clf() F = aplpy.FITSFigure(proj.hdu, convention='calabretta', figure=fig) F.show_colorscale(cmap=cm, vmin=0.1, vmax=0.65) F.add_colorbar() F.tick_labels.set_xformat('d.dd') F.tick_labels.set_yformat('d.dd') #F.recenter(**small_recen) #F.save(os.path.join(figurepath, ratio.replace(".fits",".pdf"))) F.recenter(**big_recen) F.add_label(1.60, -0.22, "$v=[{0}, {1}]$ km s$^{{-1}}$".format(vrange[0],vrange[1]), color='w', size=14, zorder=20, horizontalalignment='left') F.save(os.path.join(figurepath, 'big_maps', "big_H2CO_321220_to_303202{0}_bl_{1}to{2}.pdf" .format(smooth,int(vrange[0]),int(vrange[1])) ) ) fig = pl.figure(1, figsize=figsize) for (cubehi,cubelo),sn,smooth in zip(((cube303m,cube321m), (cube303msm,cube321msm)), (sncube, sncubesm), ("","_smooth",)):#"_vsmooth"): for vrange in zip(vcuts[:-1], vcuts[1:]): projhi = cubehi.spectral_slab(*(vrange*u.km/u.s)).mean(axis=0) projlo = cubelo.spectral_slab(*(vrange*u.km/u.s)).mean(axis=0) proj = projlo/projhi hdu = fits.PrimaryHDU(data=proj.decompose().value, header=projlo.hdu.header) fig.clf() F = aplpy.FITSFigure(hdu, convention='calabretta', figure=fig) cm = matplotlib.cm.RdYlBu_r cm.set_bad('#888888') F.show_colorscale(cmap=cm, vmin=0.1, vmax=0.65) F.add_colorbar() F.tick_labels.set_xformat('d.dd') F.tick_labels.set_yformat('d.dd') #F.recenter(**small_recen) #F.save(os.path.join(figurepath, ratio.replace(".fits",".pdf"))) F.recenter(**big_recen) F.add_label(1.60, -0.22, "$v=[{0}, {1}]$ km s$^{{-1}}$".format(vrange[0],vrange[1]), color='w', size=14, zorder=20, horizontalalignment='left') F.save(os.path.join(figurepath, 'big_maps', "big_H2CO_321220_to_303202{0}_bl_{1}to{2}_slabratio.pdf" .format(smooth,int(vrange[0]),int(vrange[1])) ) ) tproj = np.copy(proj) tproj[np.isfinite(proj)] = pwtem(proj[np.isfinite(proj)].value) hdu = fits.PrimaryHDU(tproj, projhi.hdu.header) fig.clf() F = aplpy.FITSFigure(hdu, convention='calabretta', figure=fig) #cm = copy.copy(pl.cm.rainbow) cm = copy.copy(pl.cm.RdYlBu_r) cm.set_bad('#888888') #cm.set_bad((0.5,0.5,0.5,0.5)) F.show_colorscale(cmap=cm,vmin=15,vmax=200) F.set_tick_labels_format('d.dd','d.dd') peaksn = sn.spectral_slab(*(vrange*u.km/u.s)).max(axis=0) peaksn[(peaksn<0) | np.isnan(peaksn)] = 0 color = (0.5,)*3 # should be same as background #888 nlevs = 50 F.show_contour(peaksn.hdu, levels=[0]+np.logspace(0.20,1,nlevs).tolist(), colors=[(0.5,0.5,0.5,1)] + [color + (alpha,) for alpha in np.exp(-(np.logspace(0.20,1,nlevs)-10**0.2)**2/(1.0**2*2.))], filled=True, zorder=10, convention='calabretta') F.add_colorbar() F.add_label(1.60, -0.22, "$v=[{0}, {1}]$ km s$^{{-1}}$".format(vrange[0],vrange[1]), color='w', size=14, horizontalalignment='left', zorder=20) F.colorbar.set_axis_label_text('T (K)') F.recenter(**big_recen) F.save(os.path.join(figurepath, 'big_maps', 'big_lores{0}_tmap_greyed_{1}to{2}_slabratio.png'.format(smooth, int(vrange[0]), int(vrange[1])))) log.info(os.path.join(figurepath, 'big_maps', 'big_lores{0}_tmap_greyed_{1}to{2}_slabratio.png'.format(smooth, int(vrange[0]), int(vrange[1])))) fig = pl.figure(1, figsize=figsize) for cube,sn,smooth in zip((ratiocube_303321, ratiocubesm_303321), (sncube, sncubesm), ("","_smooth",)):#"_vsmooth"): for vrange in zip(vcuts[:-1], vcuts[1:]): fig.clf() proj = cube.spectral_slab(*(vrange*u.km/u.s)).mean(axis=0) tproj = np.copy(proj) tproj[np.isfinite(proj)] = pwtem(proj[np.isfinite(proj)].value) hdu = fits.PrimaryHDU(tproj, proj.hdu.header) hdu.writeto(hpath("tmap{0}_{1}to{2}".format(smooth, int(vrange[0]), int(vrange[1]))), clobber=True) F = aplpy.FITSFigure(hdu, convention='calabretta', figure=fig) #cm = copy.copy(pl.cm.rainbow) #cm.set_bad((0.5,0.5,0.5,0.5)) cm = copy.copy(pl.cm.RdYlBu_r) cm.set_bad('#888888') F.show_colorscale(cmap=cm,vmin=15,vmax=200) F.set_tick_labels_format('d.dd','d.dd') peaksn = sn.spectral_slab(*(vrange*u.km/u.s)).max(axis=0) peaksn[(peaksn<0) | np.isnan(peaksn)] = 0 color = (0.5,)*3 # should be same as background #888 nlevs = 50 F.show_contour(peaksn.hdu, levels=[0]+np.logspace(0.20,1,nlevs).tolist(), colors=[(0.5,0.5,0.5,1)] + [color + (alpha,) for alpha in np.exp(-(np.logspace(0.20,1,nlevs)-10**0.2)**2/(1.0**2*2.))], filled=True, zorder=10, convention='calabretta') F.add_colorbar() F.add_label(1.60, -0.22, "$v=[{0}, {1}]$ km s$^{{-1}}$".format(vrange[0],vrange[1]), color='w', size=14, zorder=20, horizontalalignment='left') F.colorbar.set_axis_label_text('T (K)') F.recenter(**big_recen) F.save(os.path.join(figurepath, 'big_maps', 'big_lores{0}_tmap_greyed_{1}to{2}.png'.format(smooth, int(vrange[0]), int(vrange[1])))) log.info(os.path.join(figurepath, 'big_maps', 'big_lores{0}_tmap_greyed_{1}to{2}.png'.format(smooth, int(vrange[0]), int(vrange[1]))))
bsd-3-clause
alexsavio/scikit-learn
sklearn/cluster/tests/test_k_means.py
20
31445
"""Testing for K-means""" import sys import numpy as np from scipy import sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_warns from sklearn.utils.testing import if_safe_multiprocessing_with_blas from sklearn.utils.testing import assert_raise_message from sklearn.utils.extmath import row_norms from sklearn.metrics.cluster import v_measure_score from sklearn.cluster import KMeans, k_means from sklearn.cluster import MiniBatchKMeans from sklearn.cluster.k_means_ import _labels_inertia from sklearn.cluster.k_means_ import _mini_batch_step from sklearn.datasets.samples_generator import make_blobs from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.exceptions import DataConversionWarning from sklearn.metrics.cluster import homogeneity_score # non centered, sparse centers to check the centers = np.array([ [0.0, 5.0, 0.0, 0.0, 0.0], [1.0, 1.0, 4.0, 0.0, 0.0], [1.0, 0.0, 0.0, 5.0, 1.0], ]) n_samples = 100 n_clusters, n_features = centers.shape X, true_labels = make_blobs(n_samples=n_samples, centers=centers, cluster_std=1., random_state=42) X_csr = sp.csr_matrix(X) def test_elkan_results(): rnd = np.random.RandomState(0) X_normal = rnd.normal(size=(50, 10)) X_blobs, _ = make_blobs(random_state=0) km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1) km_elkan = KMeans(algorithm='elkan', n_clusters=5, random_state=0, n_init=1) for X in [X_normal, X_blobs]: km_full.fit(X) km_elkan.fit(X) assert_array_almost_equal(km_elkan.cluster_centers_, km_full.cluster_centers_) assert_array_equal(km_elkan.labels_, km_full.labels_) def test_labels_assignment_and_inertia(): # pure numpy implementation as easily auditable reference gold # implementation rng = np.random.RandomState(42) noisy_centers = centers + rng.normal(size=centers.shape) labels_gold = - np.ones(n_samples, dtype=np.int) mindist = np.empty(n_samples) mindist.fill(np.infty) for center_id in range(n_clusters): dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1) labels_gold[dist < mindist] = center_id mindist = np.minimum(dist, mindist) inertia_gold = mindist.sum() assert_true((mindist >= 0.0).all()) assert_true((labels_gold != -1).all()) # perform label assignment using the dense array input x_squared_norms = (X ** 2).sum(axis=1) labels_array, inertia_array = _labels_inertia( X, x_squared_norms, noisy_centers) assert_array_almost_equal(inertia_array, inertia_gold) assert_array_equal(labels_array, labels_gold) # perform label assignment using the sparse CSR input x_squared_norms_from_csr = row_norms(X_csr, squared=True) labels_csr, inertia_csr = _labels_inertia( X_csr, x_squared_norms_from_csr, noisy_centers) assert_array_almost_equal(inertia_csr, inertia_gold) assert_array_equal(labels_csr, labels_gold) def test_minibatch_update_consistency(): # Check that dense and sparse minibatch update give the same results rng = np.random.RandomState(42) old_centers = centers + rng.normal(size=centers.shape) new_centers = old_centers.copy() new_centers_csr = old_centers.copy() counts = np.zeros(new_centers.shape[0], dtype=np.int32) counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32) x_squared_norms = (X ** 2).sum(axis=1) x_squared_norms_csr = row_norms(X_csr, squared=True) buffer = np.zeros(centers.shape[1], dtype=np.double) buffer_csr = np.zeros(centers.shape[1], dtype=np.double) # extract a small minibatch X_mb = X[:10] X_mb_csr = X_csr[:10] x_mb_squared_norms = x_squared_norms[:10] x_mb_squared_norms_csr = x_squared_norms_csr[:10] # step 1: compute the dense minibatch update old_inertia, incremental_diff = _mini_batch_step( X_mb, x_mb_squared_norms, new_centers, counts, buffer, 1, None, random_reassign=False) assert_greater(old_inertia, 0.0) # compute the new inertia on the same batch to check that it decreased labels, new_inertia = _labels_inertia( X_mb, x_mb_squared_norms, new_centers) assert_greater(new_inertia, 0.0) assert_less(new_inertia, old_inertia) # check that the incremental difference computation is matching the # final observed value effective_diff = np.sum((new_centers - old_centers) ** 2) assert_almost_equal(incremental_diff, effective_diff) # step 2: compute the sparse minibatch update old_inertia_csr, incremental_diff_csr = _mini_batch_step( X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr, buffer_csr, 1, None, random_reassign=False) assert_greater(old_inertia_csr, 0.0) # compute the new inertia on the same batch to check that it decreased labels_csr, new_inertia_csr = _labels_inertia( X_mb_csr, x_mb_squared_norms_csr, new_centers_csr) assert_greater(new_inertia_csr, 0.0) assert_less(new_inertia_csr, old_inertia_csr) # check that the incremental difference computation is matching the # final observed value effective_diff = np.sum((new_centers_csr - old_centers) ** 2) assert_almost_equal(incremental_diff_csr, effective_diff) # step 3: check that sparse and dense updates lead to the same results assert_array_equal(labels, labels_csr) assert_array_almost_equal(new_centers, new_centers_csr) assert_almost_equal(incremental_diff, incremental_diff_csr) assert_almost_equal(old_inertia, old_inertia_csr) assert_almost_equal(new_inertia, new_inertia_csr) def _check_fitted_model(km): # check that the number of clusters centers and distinct labels match # the expectation centers = km.cluster_centers_ assert_equal(centers.shape, (n_clusters, n_features)) labels = km.labels_ assert_equal(np.unique(labels).shape[0], n_clusters) # check that the labels assignment are perfect (up to a permutation) assert_equal(v_measure_score(true_labels, labels), 1.0) assert_greater(km.inertia_, 0.0) # check error on dataset being too small assert_raises(ValueError, km.fit, [[0., 1.]]) def test_k_means_plus_plus_init(): km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42).fit(X) _check_fitted_model(km) def test_k_means_new_centers(): # Explore the part of the code where a new center is reassigned X = np.array([[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0]]) labels = [0, 1, 2, 1, 1, 2] bad_centers = np.array([[+0, 1, 0, 0], [.2, 0, .2, .2], [+0, 0, 0, 0]]) km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10, random_state=1) for this_X in (X, sp.coo_matrix(X)): km.fit(this_X) this_labels = km.labels_ # Reorder the labels so that the first instance is in cluster 0, # the second in cluster 1, ... this_labels = np.unique(this_labels, return_index=True)[1][this_labels] np.testing.assert_array_equal(this_labels, labels) @if_safe_multiprocessing_with_blas def test_k_means_plus_plus_init_2_jobs(): if sys.version_info[:2] < (3, 4): raise SkipTest( "Possible multi-process bug with some BLAS under Python < 3.4") km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2, random_state=42).fit(X) _check_fitted_model(km) def test_k_means_precompute_distances_flag(): # check that a warning is raised if the precompute_distances flag is not # supported km = KMeans(precompute_distances="wrong") assert_raises(ValueError, km.fit, X) def test_k_means_plus_plus_init_sparse(): km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42) km.fit(X_csr) _check_fitted_model(km) def test_k_means_random_init(): km = KMeans(init="random", n_clusters=n_clusters, random_state=42) km.fit(X) _check_fitted_model(km) def test_k_means_random_init_sparse(): km = KMeans(init="random", n_clusters=n_clusters, random_state=42) km.fit(X_csr) _check_fitted_model(km) def test_k_means_plus_plus_init_not_precomputed(): km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42, precompute_distances=False).fit(X) _check_fitted_model(km) def test_k_means_random_init_not_precomputed(): km = KMeans(init="random", n_clusters=n_clusters, random_state=42, precompute_distances=False).fit(X) _check_fitted_model(km) def test_k_means_perfect_init(): km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42, n_init=1) km.fit(X) _check_fitted_model(km) def test_k_means_n_init(): rnd = np.random.RandomState(0) X = rnd.normal(size=(40, 2)) # two regression tests on bad n_init argument # previous bug: n_init <= 0 threw non-informative TypeError (#3858) assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X) assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X) def test_k_means_explicit_init_shape(): # test for sensible errors when giving explicit init # with wrong number of features or clusters rnd = np.random.RandomState(0) X = rnd.normal(size=(40, 3)) for Class in [KMeans, MiniBatchKMeans]: # mismatch of number of features km = Class(n_init=1, init=X[:, :2], n_clusters=len(X)) msg = "does not match the number of features of the data" assert_raises_regex(ValueError, msg, km.fit, X) # for callable init km = Class(n_init=1, init=lambda X_, k, random_state: X_[:, :2], n_clusters=len(X)) assert_raises_regex(ValueError, msg, km.fit, X) # mismatch of number of clusters msg = "does not match the number of clusters" km = Class(n_init=1, init=X[:2, :], n_clusters=3) assert_raises_regex(ValueError, msg, km.fit, X) # for callable init km = Class(n_init=1, init=lambda X_, k, random_state: X_[:2, :], n_clusters=3) assert_raises_regex(ValueError, msg, km.fit, X) def test_k_means_fortran_aligned_data(): # Check the KMeans will work well, even if X is a fortran-aligned data. X = np.asfortranarray([[0, 0], [0, 1], [0, 1]]) centers = np.array([[0, 0], [0, 1]]) labels = np.array([0, 1, 1]) km = KMeans(n_init=1, init=centers, precompute_distances=False, random_state=42, n_clusters=2) km.fit(X) assert_array_equal(km.cluster_centers_, centers) assert_array_equal(km.labels_, labels) def test_mb_k_means_plus_plus_init_dense_array(): mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters, random_state=42) mb_k_means.fit(X) _check_fitted_model(mb_k_means) def test_mb_kmeans_verbose(): mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters, random_state=42, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: mb_k_means.fit(X) finally: sys.stdout = old_stdout def test_mb_k_means_plus_plus_init_sparse_matrix(): mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters, random_state=42) mb_k_means.fit(X_csr) _check_fitted_model(mb_k_means) def test_minibatch_init_with_large_k(): mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20) # Check that a warning is raised, as the number clusters is larger # than the init_size assert_warns(RuntimeWarning, mb_k_means.fit, X) def test_minibatch_k_means_random_init_dense_array(): # increase n_init to make random init stable enough mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters, random_state=42, n_init=10).fit(X) _check_fitted_model(mb_k_means) def test_minibatch_k_means_random_init_sparse_csr(): # increase n_init to make random init stable enough mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters, random_state=42, n_init=10).fit(X_csr) _check_fitted_model(mb_k_means) def test_minibatch_k_means_perfect_init_dense_array(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42, n_init=1).fit(X) _check_fitted_model(mb_k_means) def test_minibatch_k_means_init_multiple_runs_with_explicit_centers(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42, n_init=10) assert_warns(RuntimeWarning, mb_k_means.fit, X) def test_minibatch_k_means_perfect_init_sparse_csr(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42, n_init=1).fit(X_csr) _check_fitted_model(mb_k_means) def test_minibatch_sensible_reassign_fit(): # check if identical initial clusters are reassigned # also a regression test for when there are more desired reassignments than # samples. zeroed_X, true_labels = make_blobs(n_samples=100, centers=5, cluster_std=1., random_state=42) zeroed_X[::2, :] = 0 mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42, init="random") mb_k_means.fit(zeroed_X) # there should not be too many exact zero cluster centers assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10) # do the same with batch-size > X.shape[0] (regression test) mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201, random_state=42, init="random") mb_k_means.fit(zeroed_X) # there should not be too many exact zero cluster centers assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10) def test_minibatch_sensible_reassign_partial_fit(): zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5, cluster_std=1., random_state=42) zeroed_X[::2, :] = 0 mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random") for i in range(100): mb_k_means.partial_fit(zeroed_X) # there should not be too many exact zero cluster centers assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10) def test_minibatch_reassign(): # Give a perfect initialization, but a large reassignment_ratio, # as a result all the centers should be reassigned and the model # should not longer be good for this_X in (X, X_csr): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100, random_state=42) mb_k_means.fit(this_X) score_before = mb_k_means.score(this_X) try: old_stdout = sys.stdout sys.stdout = StringIO() # Turn on verbosity to smoke test the display code _mini_batch_step(this_X, (X ** 2).sum(axis=1), mb_k_means.cluster_centers_, mb_k_means.counts_, np.zeros(X.shape[1], np.double), False, distances=np.zeros(X.shape[0]), random_reassign=True, random_state=42, reassignment_ratio=1, verbose=True) finally: sys.stdout = old_stdout assert_greater(score_before, mb_k_means.score(this_X)) # Give a perfect initialization, with a small reassignment_ratio, # no center should be reassigned for this_X in (X, X_csr): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100, init=centers.copy(), random_state=42, n_init=1) mb_k_means.fit(this_X) clusters_before = mb_k_means.cluster_centers_ # Turn on verbosity to smoke test the display code _mini_batch_step(this_X, (X ** 2).sum(axis=1), mb_k_means.cluster_centers_, mb_k_means.counts_, np.zeros(X.shape[1], np.double), False, distances=np.zeros(X.shape[0]), random_reassign=True, random_state=42, reassignment_ratio=1e-15) assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_) def test_minibatch_with_many_reassignments(): # Test for the case that the number of clusters to reassign is bigger # than the batch_size n_samples = 550 rnd = np.random.RandomState(42) X = rnd.uniform(size=(n_samples, 10)) # Check that the fit works if n_clusters is bigger than the batch_size. # Run the test with 550 clusters and 550 samples, because it turned out # that this values ensure that the number of clusters to reassign # is always bigger than the batch_size n_clusters = 550 MiniBatchKMeans(n_clusters=n_clusters, batch_size=100, init_size=n_samples, random_state=42).fit(X) def test_sparse_mb_k_means_callable_init(): def test_init(X, k, random_state): return centers # Small test to check that giving the wrong number of centers # raises a meaningful error msg = "does not match the number of clusters" assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr) # Now check that the fit actually works mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init, random_state=42).fit(X_csr) _check_fitted_model(mb_k_means) def test_mini_batch_k_means_random_init_partial_fit(): km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42) # use the partial_fit API for online learning for X_minibatch in np.array_split(X, 10): km.partial_fit(X_minibatch) # compute the labeling on the complete dataset labels = km.predict(X) assert_equal(v_measure_score(true_labels, labels), 1.0) def test_minibatch_default_init_size(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, batch_size=10, random_state=42, n_init=1).fit(X) assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size) _check_fitted_model(mb_k_means) def test_minibatch_tol(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10, random_state=42, tol=.01).fit(X) _check_fitted_model(mb_k_means) def test_minibatch_set_init_size(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, init_size=666, random_state=42, n_init=1).fit(X) assert_equal(mb_k_means.init_size, 666) assert_equal(mb_k_means.init_size_, n_samples) _check_fitted_model(mb_k_means) def test_k_means_invalid_init(): km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters) assert_raises(ValueError, km.fit, X) def test_mini_match_k_means_invalid_init(): km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters) assert_raises(ValueError, km.fit, X) def test_k_means_copyx(): # Check if copy_x=False returns nearly equal X after de-centering. my_X = X.copy() km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42) km.fit(my_X) _check_fitted_model(km) # check if my_X is centered assert_array_almost_equal(my_X, X) def test_k_means_non_collapsed(): # Check k_means with a bad initialization does not yield a singleton # Starting with bad centers that are quickly ignored should not # result in a repositioning of the centers to the center of mass that # would lead to collapsed centers which in turns make the clustering # dependent of the numerical unstabilities. my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]]) array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]]) km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1) km.fit(my_X) # centers must not been collapsed assert_equal(len(np.unique(km.labels_)), 3) centers = km.cluster_centers_ assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1) assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1) assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1) def test_predict(): km = KMeans(n_clusters=n_clusters, random_state=42) km.fit(X) # sanity check: predict centroid labels pred = km.predict(km.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # sanity check: re-predict labeling for training set samples pred = km.predict(X) assert_array_equal(pred, km.labels_) # re-predict labels for training set using fit_predict pred = km.fit_predict(X) assert_array_equal(pred, km.labels_) def test_score(): km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1) s1 = km1.fit(X).score(X) km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1) s2 = km2.fit(X).score(X) assert_greater(s2, s1) km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1, algorithm='elkan') s1 = km1.fit(X).score(X) km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1, algorithm='elkan') s2 = km2.fit(X).score(X) assert_greater(s2, s1) def test_predict_minibatch_dense_input(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X) # sanity check: predict centroid labels pred = mb_k_means.predict(mb_k_means.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # sanity check: re-predict labeling for training set samples pred = mb_k_means.predict(X) assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_) def test_predict_minibatch_kmeanspp_init_sparse_input(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++', n_init=10).fit(X_csr) # sanity check: re-predict labeling for training set samples assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_) # sanity check: predict centroid labels pred = mb_k_means.predict(mb_k_means.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # check that models trained on sparse input also works for dense input at # predict time assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_) def test_predict_minibatch_random_init_sparse_input(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=10).fit(X_csr) # sanity check: re-predict labeling for training set samples assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_) # sanity check: predict centroid labels pred = mb_k_means.predict(mb_k_means.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # check that models trained on sparse input also works for dense input at # predict time assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_) def test_int_input(): X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]] for dtype in [np.int32, np.int64]: X_int = np.array(X_list, dtype=dtype) X_int_csr = sp.csr_matrix(X_int) init_int = X_int[:2] fitted_models = [ KMeans(n_clusters=2).fit(X_int), KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int), # mini batch kmeans is very unstable on such a small dataset hence # we use many inits MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int), MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr), MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int, n_init=1).fit(X_int), MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int, n_init=1).fit(X_int_csr), ] for km in fitted_models: assert_equal(km.cluster_centers_.dtype, np.float64) expected_labels = [0, 1, 1, 0, 0, 1] scores = np.array([v_measure_score(expected_labels, km.labels_) for km in fitted_models]) assert_array_equal(scores, np.ones(scores.shape[0])) def test_transform(): km = KMeans(n_clusters=n_clusters) km.fit(X) X_new = km.transform(km.cluster_centers_) for c in range(n_clusters): assert_equal(X_new[c, c], 0) for c2 in range(n_clusters): if c != c2: assert_greater(X_new[c, c2], 0) def test_fit_transform(): X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X) X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X) assert_array_equal(X1, X2) def test_predict_equal_labels(): km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1, algorithm='full') km.fit(X) assert_array_equal(km.predict(X), km.labels_) km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1, algorithm='elkan') km.fit(X) assert_array_equal(km.predict(X), km.labels_) def test_full_vs_elkan(): km1 = KMeans(algorithm='full', random_state=13) km2 = KMeans(algorithm='elkan', random_state=13) km1.fit(X) km2.fit(X) homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0 def test_n_init(): # Check that increasing the number of init increases the quality n_runs = 5 n_init_range = [1, 5, 10] inertia = np.zeros((len(n_init_range), n_runs)) for i, n_init in enumerate(n_init_range): for j in range(n_runs): km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, random_state=j).fit(X) inertia[i, j] = km.inertia_ inertia = inertia.mean(axis=1) failure_msg = ("Inertia %r should be decreasing" " when n_init is increasing.") % list(inertia) for i in range(len(n_init_range) - 1): assert_true(inertia[i] >= inertia[i + 1], failure_msg) def test_k_means_function(): # test calling the k_means function directly # catch output old_stdout = sys.stdout sys.stdout = StringIO() try: cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters, verbose=True) finally: sys.stdout = old_stdout centers = cluster_centers assert_equal(centers.shape, (n_clusters, n_features)) labels = labels assert_equal(np.unique(labels).shape[0], n_clusters) # check that the labels assignment are perfect (up to a permutation) assert_equal(v_measure_score(true_labels, labels), 1.0) assert_greater(inertia, 0.0) # check warning when centers are passed assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters, init=centers) # to many clusters desired assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1) def test_x_squared_norms_init_centroids(): """Test that x_squared_norms can be None in _init_centroids""" from sklearn.cluster.k_means_ import _init_centroids X_norms = np.sum(X**2, axis=1) precompute = _init_centroids( X, 3, "k-means++", random_state=0, x_squared_norms=X_norms) assert_array_equal( precompute, _init_centroids(X, 3, "k-means++", random_state=0)) def test_max_iter_error(): km = KMeans(max_iter=-1) assert_raise_message(ValueError, 'Number of iterations should be', km.fit, X) def test_float_precision(): km = KMeans(n_init=1, random_state=30) mb_km = MiniBatchKMeans(n_init=1, random_state=30) inertia = {} X_new = {} centers = {} for estimator in [km, mb_km]: for is_sparse in [False, True]: for dtype in [np.float64, np.float32]: if is_sparse: X_test = sp.csr_matrix(X_csr, dtype=dtype) else: X_test = X.astype(dtype) estimator.fit(X_test) # dtype of cluster centers has to be the dtype of the input # data assert_equal(estimator.cluster_centers_.dtype, dtype) inertia[dtype] = estimator.inertia_ X_new[dtype] = estimator.transform(X_test) centers[dtype] = estimator.cluster_centers_ # ensure the extracted row is a 2d array assert_equal(estimator.predict(X_test[:1]), estimator.labels_[0]) if hasattr(estimator, 'partial_fit'): estimator.partial_fit(X_test[0:3]) # dtype of cluster centers has to stay the same after # partial_fit assert_equal(estimator.cluster_centers_.dtype, dtype) # compare arrays with low precision since the difference between # 32 and 64 bit sometimes makes a difference up to the 4th decimal # place assert_array_almost_equal(inertia[np.float32], inertia[np.float64], decimal=4) assert_array_almost_equal(X_new[np.float32], X_new[np.float64], decimal=4) assert_array_almost_equal(centers[np.float32], centers[np.float64], decimal=4) def test_KMeans_init_centers(): # This test is used to check KMeans won't mutate the user provided input # array silently even if input data and init centers have the same type X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]]) init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]]) for dtype in [np.int32, np.int64, np.float32, np.float64]: X_test = dtype(X_small) init_centers_test = dtype(init_centers) assert_array_equal(init_centers, init_centers_test) km = KMeans(init=init_centers_test, n_clusters=3, n_init=1) km.fit(X_test) assert_equal(False, np.may_share_memory(km.cluster_centers_, init_centers))
bsd-3-clause
e-q/scipy
doc/source/tutorial/examples/optimize_global_1.py
15
1752
import numpy as np import matplotlib.pyplot as plt from scipy import optimize def eggholder(x): return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47)))) -x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47))))) bounds = [(-512, 512), (-512, 512)] x = np.arange(-512, 513) y = np.arange(-512, 513) xgrid, ygrid = np.meshgrid(x, y) xy = np.stack([xgrid, ygrid]) results = dict() results['shgo'] = optimize.shgo(eggholder, bounds) results['DA'] = optimize.dual_annealing(eggholder, bounds) results['DE'] = optimize.differential_evolution(eggholder, bounds) results['BH'] = optimize.basinhopping(eggholder, bounds) results['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=200, iters=5, sampling_method='sobol') fig = plt.figure(figsize=(4.5, 4.5)) ax = fig.add_subplot(111) im = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower', cmap='gray') ax.set_xlabel('x') ax.set_ylabel('y') def plot_point(res, marker='o', color=None): ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10) plot_point(results['BH'], color='y') # basinhopping - yellow plot_point(results['DE'], color='c') # differential_evolution - cyan plot_point(results['DA'], color='w') # dual_annealing. - white # SHGO produces multiple minima, plot them all (with a smaller marker size) plot_point(results['shgo'], color='r', marker='+') plot_point(results['shgo_sobol'], color='r', marker='x') for i in range(results['shgo_sobol'].xl.shape[0]): ax.plot(512 + results['shgo_sobol'].xl[i, 0], 512 + results['shgo_sobol'].xl[i, 1], 'ro', ms=2) ax.set_xlim([-4, 514*2]) ax.set_ylim([-4, 514*2]) fig.tight_layout() plt.show()
bsd-3-clause
naritta/numpy
numpy/lib/function_base.py
1
126797
from __future__ import division, absolute_import, print_function import warnings import sys import collections import operator import numpy as np import numpy.core.numeric as _nx from numpy.core import linspace, atleast_1d, atleast_2d from numpy.core.numeric import ( ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, empty_like, ndarray, around, floor, ceil, take, dot, where, intp, integer, isscalar ) from numpy.core.umath import ( pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, mod, exp, log10 ) from numpy.core.fromnumeric import ( ravel, nonzero, sort, partition, mean ) from numpy.core.numerictypes import typecodes, number from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import _insert, add_docstring from numpy.core.multiarray import digitize, bincount, interp as compiled_interp from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc from numpy.compat import long # Force range to be a generator, for np.delete's usage. if sys.version_info[0] < 3: range = xrange __all__ = [ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' ] def iterable(y): """ Check whether or not an object can be iterated over. Parameters ---------- y : object Input object. Returns ------- b : {0, 1} Return 1 if the object has an iterator method or is a sequence, and 0 otherwise. Examples -------- >>> np.iterable([1, 2, 3]) 1 >>> np.iterable(2) 0 """ try: iter(y) except: return 0 return 1 def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): """ Compute the histogram of a set of data. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. normed : bool, optional This keyword is deprecated in Numpy 1.6 due to confusing/buggy behavior. It will be removed in Numpy 2.0. Use the density keyword instead. If False, the result will contain the number of samples in each bin. If True, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that this latter behavior is known to be buggy with unequal bin widths; use `density` instead. weights : array_like, optional An array of weights, of the same shape as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `normed` is True, the weights are normalized, so that the integral of the density over the range remains 1 density : bool, optional If False, the result will contain the number of samples in each bin. If True, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the `normed` keyword if given. Returns ------- hist : array The values of the histogram. See `normed` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. See Also -------- histogramdd, bincount, searchsorted, digitize Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is:: [1, 2, 3, 4] then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. Examples -------- >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) (array([1, 4, 1]), array([0, 1, 2, 3])) >>> a = np.arange(5) >>> hist, bin_edges = np.histogram(a, density=True) >>> hist array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 >>> np.sum(hist*np.diff(bin_edges)) 1.0 """ a = asarray(a) if weights is not None: weights = asarray(weights) if np.any(weights.shape != a.shape): raise ValueError( 'weights should have the same shape as a.') weights = weights.ravel() a = a.ravel() if (range is not None): mn, mx = range if (mn > mx): raise AttributeError( 'max must be larger than min in range parameter.') if not iterable(bins): if np.isscalar(bins) and bins < 1: raise ValueError( '`bins` should be a positive integer.') if range is None: if a.size == 0: # handle empty arrays. Can't determine range, so use 0-1. range = (0, 1) else: range = (a.min(), a.max()) mn, mx = [mi + 0.0 for mi in range] if mn == mx: mn -= 0.5 mx += 0.5 bins = linspace(mn, mx, bins + 1, endpoint=True) else: bins = asarray(bins) if (np.diff(bins) < 0).any(): raise AttributeError( 'bins must increase monotonically.') # Histogram is an integer or a float array depending on the weights. if weights is None: ntype = int else: ntype = weights.dtype n = np.zeros(bins.shape, ntype) block = 65536 if weights is None: for i in arange(0, len(a), block): sa = sort(a[i:i+block]) n += np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] else: zero = array(0, dtype=ntype) for i in arange(0, len(a), block): tmp_a = a[i:i+block] tmp_w = weights[i:i+block] sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] cw = np.concatenate(([zero, ], sw.cumsum())) bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] n += cw[bin_index] n = np.diff(n) if density is not None: if density: db = array(np.diff(bins), float) return n/db/n.sum(), bins else: return n, bins else: # deprecated, buggy behavior. Remove for Numpy 2.0 if normed: db = array(np.diff(bins), float) return n/(n*db).sum(), bins else: return n, bins def histogramdd(sample, bins=10, range=None, normed=False, weights=None): """ Compute the multidimensional histogram of some data. Parameters ---------- sample : array_like The data to be histogrammed. It must be an (N,D) array or data that can be converted to such. The rows of the resulting array are the coordinates of points in a D dimensional polytope. bins : sequence or int, optional The bin specification: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) * The number of bins for all dimensions (nx=ny=...=bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitly in `bins`. Defaults to the minimum and maximum values along each dimension. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density ``bin_count / sample_count / bin_volume``. weights : (N,) array_like, optional An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. Weights are normalized to 1 if normed is True. If normed is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray The multidimensional histogram of sample x. See normed and weights for the different possible semantics. edges : list A list of D arrays describing the bin edges for each dimension. See Also -------- histogram: 1-D histogram histogram2d: 2-D histogram Examples -------- >>> r = np.random.randn(100,3) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) """ try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = atleast_2d(sample).T N, D = sample.shape nbin = empty(D, int) edges = D*[None] dedges = D*[None] if weights is not None: weights = asarray(weights) try: M = len(bins) if M != D: raise AttributeError( 'The dimension of bins must be equal to the dimension of the ' ' sample x.') except TypeError: # bins is an integer bins = D*[bins] # Select range for each dimension # Used only if number of bins is given. if range is None: # Handle empty input. Range can't be determined in that case, use 0-1. if N == 0: smin = zeros(D) smax = ones(D) else: smin = atleast_1d(array(sample.min(0), float)) smax = atleast_1d(array(sample.max(0), float)) else: smin = zeros(D) smax = zeros(D) for i in arange(D): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in arange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # avoid rounding issues for comparisons when dealing with inexact types if np.issubdtype(sample.dtype, np.inexact): edge_dt = sample.dtype else: edge_dt = float # Create edge arrays for i in arange(D): if isscalar(bins[i]): if bins[i] < 1: raise ValueError( "Element at index %s in `bins` should be a positive " "integer." % i) nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) else: edges[i] = asarray(bins[i], edge_dt) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = diff(edges[i]) if np.any(np.asarray(dedges[i]) <= 0): raise ValueError( "Found bin edge of size <= 0. Did you specify `bins` with" "non-monotonic sequence?") nbin = asarray(nbin) # Handle empty input. if N == 0: return np.zeros(nbin-2), edges # Compute the bin number each sample falls into. Ncount = {} for i in arange(D): Ncount[i] = digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right edge to be # counted in the last bin, and not as an outlier. for i in arange(D): # Rounding precision mindiff = dedges[i].min() if not np.isinf(mindiff): decimal = int(-log10(mindiff)) + 6 # Find which points are on the rightmost edge. not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal)) # Shift these points one bin to the left. Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 # Flattened histogram matrix (1D) # Reshape is used so that overlarge arrays # will raise an error. hist = zeros(nbin, float).reshape(-1) # Compute the sample indices in the flattened histogram matrix. ni = nbin.argsort() xy = zeros(N, int) for i in arange(0, D-1): xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() xy += Ncount[ni[-1]] # Compute the number of repetitions in xy and assign it to the # flattened histmat. if len(xy) == 0: return zeros(nbin-2, int), edges flatcount = bincount(xy, weights) a = arange(len(flatcount)) hist[a] = flatcount # Shape into a proper matrix hist = hist.reshape(sort(nbin)) for i in arange(nbin.size): j = ni.argsort()[i] hist = hist.swapaxes(i, j) ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). core = D*[slice(1, -1)] hist = hist[core] # Normalize if normed is True if normed: s = hist.sum() for i in arange(D): shape = ones(D, int) shape[i] = nbin[i] - 2 hist = hist / dedges[i].reshape(shape) hist /= s if (hist.shape != nbin - 2).any(): raise RuntimeError( "Internal Shape Error") return hist, edges def average(a, axis=None, weights=None, returned=False): """ Compute the weighted average along the specified axis. Parameters ---------- a : array_like Array containing data to be averaged. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which to average `a`. If `None`, averaging is done over the flattened array. weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If `weights=None`, then all data in `a` are assumed to have a weight equal to one. returned : bool, optional Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) is returned, otherwise only the average is returned. If `weights=None`, `sum_of_weights` is equivalent to the number of elements over which the average is taken. Returns ------- average, [sum_of_weights] : array_type or double Return the average along the specified axis. When returned is `True`, return a tuple with the average as the first element and the sum of the weights as the second element. The return type is `Float` if `a` is of integer type, otherwise it is of the same type as `a`. `sum_of_weights` is of the same type as `average`. Raises ------ ZeroDivisionError When all weights along axis are zero. See `numpy.ma.average` for a version robust to this type of error. TypeError When the length of 1D `weights` is not the same as the shape of `a` along axis. See Also -------- mean ma.average : average for masked arrays -- useful if your data contains "missing" values Examples -------- >>> data = range(1,5) >>> data [1, 2, 3, 4] >>> np.average(data) 2.5 >>> np.average(range(1,11), weights=range(10,0,-1)) 4.0 >>> data = np.arange(6).reshape((3,2)) >>> data array([[0, 1], [2, 3], [4, 5]]) >>> np.average(data, axis=1, weights=[1./4, 3./4]) array([ 0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) Traceback (most recent call last): ... TypeError: Axis must be specified when shapes of a and weights differ. """ if not isinstance(a, np.matrix): a = np.asarray(a) if weights is None: avg = a.mean(axis) scl = avg.dtype.type(a.size/avg.size) else: a = a + 0.0 wgt = np.asarray(weights) # Sanity checks if a.shape != wgt.shape: if axis is None: raise TypeError( "Axis must be specified when shapes of a and weights " "differ.") if wgt.ndim != 1: raise TypeError( "1D weights expected when shapes of a and weights differ.") if wgt.shape[0] != a.shape[axis]: raise ValueError( "Length of weights not compatible with specified axis.") # setup wgt to broadcast along axis wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis) scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype)) if (scl == 0.0).any(): raise ZeroDivisionError( "Weights sum to zero, can't be normalized") avg = np.multiply(a, wgt).sum(axis)/scl if returned: scl = np.multiply(avg, 0) + scl return avg, scl else: return avg def asarray_chkfinite(a, dtype=None, order=None): """ Convert the input to an array, checking for NaNs or Infs. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. Success requires no NaNs or Infs. dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Defaults to 'C'. Returns ------- out : ndarray Array interpretation of `a`. No copy is performed if the input is already an ndarray. If `a` is a subclass of ndarray, a base class ndarray is returned. Raises ------ ValueError Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). See Also -------- asarray : Create and array. asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. asfarray : Convert input to a floating point ndarray. asfortranarray : Convert input to an ndarray with column-major memory order. fromiter : Create an array from an iterator. fromfunction : Construct an array by executing a function on grid positions. Examples -------- Convert a list into an array. If all elements are finite ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] >>> np.asarray_chkfinite(a, dtype=float) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. >>> a = [1, 2, np.inf] >>> try: ... np.asarray_chkfinite(a) ... except ValueError: ... print 'ValueError' ... ValueError """ a = asarray(a, dtype=dtype, order=order) if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): raise ValueError( "array must not contain infs or NaNs") return a def piecewise(x, condlist, funclist, *args, **kw): """ Evaluate a piecewise-defined function. Given a set of conditions and corresponding functions, evaluate each function on the input data wherever its condition is true. Parameters ---------- x : ndarray The input domain. condlist : list of bool arrays Each boolean array corresponds to a function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as the output value. Each boolean array in `condlist` selects a piece of `x`, and should therefore be of the same shape as `x`. The length of `condlist` must correspond to that of `funclist`. If one extra function is given, i.e. if ``len(funclist) - len(condlist) == 1``, then that extra function is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding condition is True. It should take an array as input and give an array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional Any further arguments given to `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is called as ``f(x, 1, 'a')``. kw : dict, optional Keyword arguments used in calling `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., lambda=1)``, then each function is called as ``f(x, lambda=1)``. Returns ------- out : ndarray The output is the same shape and type as x and is found by calling the functions in `funclist` on the appropriate portions of `x`, as defined by the boolean arrays in `condlist`. Portions not covered by any condition have a default value of 0. See Also -------- choose, select, where Notes ----- This is similar to choose or select, except that functions are evaluated on elements of `x` that satisfy the corresponding condition from `condlist`. The result is:: |-- |funclist[0](x[condlist[0]]) out = |funclist[1](x[condlist[1]]) |... |funclist[n2](x[condlist[n2]]) |-- Examples -------- Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) array([-1., -1., -1., 1., 1., 1.]) Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for ``x >= 0``. >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) """ x = asanyarray(x) n2 = len(funclist) if (isscalar(condlist) or not (isinstance(condlist[0], list) or isinstance(condlist[0], ndarray))): condlist = [condlist] condlist = array(condlist, dtype=bool) n = len(condlist) # This is a hack to work around problems with NumPy's # handling of 0-d arrays and boolean indexing with # numpy.bool_ scalars zerod = False if x.ndim == 0: x = x[None] zerod = True if condlist.shape[-1] != 1: condlist = condlist.T if n == n2 - 1: # compute the "otherwise" condition. totlist = np.logical_or.reduce(condlist, axis=0) condlist = np.vstack([condlist, ~totlist]) n += 1 if (n != n2): raise ValueError( "function list and condition list must be the same") y = zeros(x.shape, x.dtype) for k in range(n): item = funclist[k] if not isinstance(item, collections.Callable): y[condlist[k]] = item else: vals = x[condlist[k]] if vals.size > 0: y[condlist[k]] = item(vals, *args, **kw) if zerod: y = y.squeeze() return y def select(condlist, choicelist, default=0): """ Return an array drawn from elements in choicelist, depending on conditions. Parameters ---------- condlist : list of bool ndarrays The list of conditions which determine from which array in `choicelist` the output elements are taken. When multiple conditions are satisfied, the first one encountered in `condlist` is used. choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. default : scalar, optional The element inserted in `output` when all conditions evaluate to False. Returns ------- output : ndarray The output at position m is the m-th element of the array in `choicelist` where the m-th element of the corresponding array in `condlist` is True. See Also -------- where : Return elements from one of two arrays depending on condition. take, choose, compress, diag, diagonal Examples -------- >>> x = np.arange(10) >>> condlist = [x<3, x>5] >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) """ # Check the size of condlist and choicelist are the same, or abort. if len(condlist) != len(choicelist): raise ValueError( 'list of cases must be same length as list of conditions') # Now that the dtype is known, handle the deprecated select([], []) case if len(condlist) == 0: warnings.warn("select with an empty condition list is not possible" "and will be deprecated", DeprecationWarning) return np.asarray(default)[()] choicelist = [np.asarray(choice) for choice in choicelist] choicelist.append(np.asarray(default)) # need to get the result type before broadcasting for correct scalar # behaviour dtype = np.result_type(*choicelist) # Convert conditions to arrays and broadcast conditions and choices # as the shape is needed for the result. Doing it seperatly optimizes # for example when all choices are scalars. condlist = np.broadcast_arrays(*condlist) choicelist = np.broadcast_arrays(*choicelist) # If cond array is not an ndarray in boolean format or scalar bool, abort. deprecated_ints = False for i in range(len(condlist)): cond = condlist[i] if cond.dtype.type is not np.bool_: if np.issubdtype(cond.dtype, np.integer): # A previous implementation accepted int ndarrays accidentally. # Supported here deliberately, but deprecated. condlist[i] = condlist[i].astype(bool) deprecated_ints = True else: raise ValueError( 'invalid entry in choicelist: should be boolean ndarray') if deprecated_ints: msg = "select condlists containing integer ndarrays is deprecated " \ "and will be removed in the future. Use `.astype(bool)` to " \ "convert to bools." warnings.warn(msg, DeprecationWarning) if choicelist[0].ndim == 0: # This may be common, so avoid the call. result_shape = condlist[0].shape else: result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape result = np.full(result_shape, choicelist[-1], dtype) # Use np.copyto to burn each choicelist array onto result, using the # corresponding condlist as a boolean mask. This is done in reverse # order since the first choice should take precedence. choicelist = choicelist[-2::-1] condlist = condlist[::-1] for choice, cond in zip(choicelist, condlist): np.copyto(result, choice, where=cond) return result def copy(a, order='K'): """ Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:ndarray.copy are very similar, but have different default values for their order= arguments.) Returns ------- arr : ndarray Array interpretation of `a`. Notes ----- This is equivalent to >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False """ return array(a, order=order, copy=True) # Basic operations def gradient(f, *varargs, **kwargs): """ Return the gradient of an N-dimensional array. The gradient is computed using second order accurate central differences in the interior and either first differences or second order accurate one-sides (forward or backwards) differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. varargs : list of scalar, optional N scalars specifying the sample distances for each dimension, i.e. `dx`, `dy`, `dz`, ... Default distance: 1. edge_order : {1, 2}, optional Gradient is calculated using N\ :sup:`th` order accurate differences at the boundaries. Default: 1. .. versionadded:: 1.9.1 Returns ------- gradient : ndarray N arrays of the same shape as `f` giving the derivative of `f` with respect to each dimension. Examples -------- >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) >>> np.gradient(x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(x, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] >>> x = np.array([0, 1, 2, 3, 4]) >>> dx = np.gradient(x) >>> y = x**2 >>> np.gradient(y, dx, edge_order=2) array([-0., 2., 4., 6., 8.]) """ f = np.asanyarray(f) N = len(f.shape) # number of dimensions n = len(varargs) if n == 0: dx = [1.0]*N elif n == 1: dx = [varargs[0]]*N elif n == N: dx = list(varargs) else: raise SyntaxError( "invalid number of arguments") edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( '", "'.join(kwargs.keys()))) if edge_order > 2: raise ValueError("'edge_order' greater than 2 not supported") # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. outvals = [] # create slice objects --- initially all are [:, :, ..., :] slice1 = [slice(None)]*N slice2 = [slice(None)]*N slice3 = [slice(None)]*N slice4 = [slice(None)]*N otype = f.dtype.char if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: otype = 'd' # Difference of datetime64 elements results in timedelta64 if otype == 'M': # Need to use the full dtype name because it contains unit information otype = f.dtype.name.replace('datetime', 'timedelta') elif otype == 'm': # Needs to keep the specific units, can't be a general unit otype = f.dtype # Convert datetime64 data into ints. Make dummy variable `y` # that is a view of ints if the data is datetime64, otherwise # just set y equal to the the array `f`. if f.dtype.char in ["M", "m"]: y = f.view('int64') else: y = f for axis in range(N): if y.shape[axis] < 2: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least two elements are required.") # Numerical differentiation: 1st order edges, 2nd order interior if y.shape[axis] == 2 or edge_order == 1: # Use first order differences for time data out = np.empty_like(y, dtype=otype) slice1[axis] = slice(1, -1) slice2[axis] = slice(2, None) slice3[axis] = slice(None, -2) # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 out[slice1] = (y[slice2] - y[slice3])/2.0 slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 # 1D equivalent -- out[0] = (y[1] - y[0]) out[slice1] = (y[slice2] - y[slice3]) slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 # 1D equivalent -- out[-1] = (y[-1] - y[-2]) out[slice1] = (y[slice2] - y[slice3]) # Numerical differentiation: 2st order edges, 2nd order interior else: # Use second order differences where possible out = np.empty_like(y, dtype=otype) slice1[axis] = slice(1, -1) slice2[axis] = slice(2, None) slice3[axis] = slice(None, -2) # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 out[slice1] = (y[slice2] - y[slice3])/2.0 slice1[axis] = 0 slice2[axis] = 0 slice3[axis] = 1 slice4[axis] = 2 # 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0 out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 slice4[axis] = -3 # 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3]) out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 # divide by step size out /= dx[axis] outvals.append(out) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) slice2[axis] = slice(None) slice3[axis] = slice(None) slice4[axis] = slice(None) if N == 1: return outvals[0] else: return outvals def diff(a, n=1, axis=-1): """ Calculate the n-th order discrete difference along given axis. The first order difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher order differences are calculated by using `diff` recursively. Parameters ---------- a : array_like Input array n : int, optional The number of times values are differenced. axis : int, optional The axis along which the difference is taken, default is the last axis. Returns ------- diff : ndarray The `n` order differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. See Also -------- gradient, ediff1d, cumsum Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) >>> np.diff(x, n=2) array([ 1, 1, -10]) >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) """ if n == 0: return a if n < 0: raise ValueError( "order must be non-negative but got " + repr(n)) a = asanyarray(a) nd = len(a.shape) slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) if n > 1: return diff(a[slice1]-a[slice2], n-1, axis=axis) else: return a[slice1]-a[slice2] def interp(x, xp, fp, left=None, right=None, period=None): """ One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points. Parameters ---------- x : array_like The x-coordinates of the interpolated values. xp : 1-D sequence of floats The x-coordinates of the data points, must be increasing if argument `period` is not specified. Otherwise, `xp` is internally sorted after normalizing the periodic boundaries with ``xp = xp % period``. fp : 1-D sequence of floats The y-coordinates of the data points, same length as `xp`. left : float, optional Value to return for `x < xp[0]`, default is `fp[0]`. right : float, optional Value to return for `x > xp[-1]`, default is `fp[-1]`. period : None or float, optional .. versionadded:: 1.10.0 A period for the x-coordinates. This parameter allows the proper interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. Returns ------- y : float or ndarray The interpolated values, same shape as `x`. Raises ------ ValueError If `xp` and `fp` have different length If `xp` or `fp` are not 1-D sequences If `period == 0` Notes ----- Does not check that the x-coordinate sequence `xp` is increasing. If `xp` is not increasing, the results are nonsense. A simple check for increasing is:: np.all(np.diff(xp) > 0) Examples -------- >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) 1.0 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) array([ 3. , 3. , 2.5 , 0.56, 0. ]) >>> UNDEF = -99.0 >>> np.interp(3.14, xp, fp, right=UNDEF) -99.0 Plot an interpolant to the sine function: >>> x = np.linspace(0, 2*np.pi, 10) >>> y = np.sin(x) >>> xvals = np.linspace(0, 2*np.pi, 50) >>> yinterp = np.interp(xvals, x, y) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(xvals, yinterp, '-x') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.show() Interpolation with periodic x-coordinates: >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] >>> xp = [190, -190, 350, -350] >>> fp = [5, 10, 3, 4] >>> np.interp(x, xp, fp, period=360) array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) """ if period is None: if isinstance(x, (float, int, number)): return compiled_interp([x], xp, fp, left, right).item() elif isinstance(x, np.ndarray) and x.ndim == 0: return compiled_interp([x], xp, fp, left, right).item() else: return compiled_interp(x, xp, fp, left, right) else: if period == 0: raise ValueError("period must be a non-zero value") period = abs(period) left = None right = None return_array = True if isinstance(x, (float, int, number)): return_array = False x = [x] x = np.asarray(x, dtype=np.float64) xp = np.asarray(xp, dtype=np.float64) fp = np.asarray(fp, dtype=np.float64) if xp.ndim != 1 or fp.ndim != 1: raise ValueError("Data points must be 1-D sequences") if xp.shape[0] != fp.shape[0]: raise ValueError("fp and xp are not of the same length") # normalizing periodic boundaries x = x % period xp = xp % period asort_xp = np.argsort(xp) xp = xp[asort_xp] fp = fp[asort_xp] xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) fp = np.concatenate((fp[-1:], fp, fp[0:1])) if return_array: return compiled_interp(x, xp, fp, left, right) else: return compiled_interp(x, xp, fp, left, right).item() def angle(z, deg=0): """ Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. See Also -------- arctan2 absolute Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) >>> np.angle(1+1j, deg=True) # in degrees 45.0 """ if deg: fact = 180/pi else: fact = 1.0 z = asarray(z) if (issubclass(z.dtype.type, _nx.complexfloating)): zimag = z.imag zreal = z.real else: zimag = 0 zreal = z return arctan2(zimag, zreal) * fact def unwrap(p, discont=pi, axis=-1): """ Unwrap by changing deltas between values to 2*pi complement. Unwrap radian phase `p` by changing absolute jumps greater than `discont` to their 2*pi complement along the given axis. Parameters ---------- p : array_like Input array. discont : float, optional Maximum discontinuity between values, default is ``pi``. axis : int, optional Axis along which unwrap will operate, default is the last axis. Returns ------- out : ndarray Output array. See Also -------- rad2deg, deg2rad Notes ----- If the discontinuity in `p` is smaller than ``pi``, but larger than `discont`, no unwrapping is done because taking the 2*pi complement would only make the discontinuity larger. Examples -------- >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) """ p = asarray(p) nd = len(p.shape) dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) ddmod = mod(dd + pi, 2*pi) - pi _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up def sort_complex(a): """ Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> np.sort_complex([5, 3, 6, 2, 1]) array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) b.sort() if not issubclass(b.dtype.type, _nx.complexfloating): if b.dtype.char in 'bhBH': return b.astype('F') elif b.dtype.char == 'g': return b.astype('G') else: return b.astype('D') else: return b def trim_zeros(filt, trim='fb'): """ Trim the leading and/or trailing zeros from a 1-D array or sequence. Parameters ---------- filt : 1-D array or sequence Input array. trim : str, optional A string with 'f' representing trim from front and 'b' to trim from back. Default is 'fb', trim zeros from both front and back of the array. Returns ------- trimmed : 1-D array or sequence The result of trimming the input. The input data type is preserved. Examples -------- >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) >>> np.trim_zeros(a, 'b') array([0, 0, 0, 1, 2, 3, 0, 2, 1]) The input data type is preserved, list/tuple in means list/tuple out. >>> np.trim_zeros([0, 1, 2, 0]) [1, 2] """ first = 0 trim = trim.upper() if 'F' in trim: for i in filt: if i != 0.: break else: first = first + 1 last = len(filt) if 'B' in trim: for i in filt[::-1]: if i != 0.: break else: last = last - 1 return filt[first:last] @deprecate def unique(x): """ This function is deprecated. Use numpy.lib.arraysetops.unique() instead. """ try: tmp = x.flatten() if tmp.size == 0: return tmp tmp.sort() idx = concatenate(([True], tmp[1:] != tmp[:-1])) return tmp[idx] except AttributeError: items = sorted(set(x)) return asarray(items) def extract(condition, arr): """ Return the elements of an array that satisfy some condition. This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. Note that `place` does the exact opposite of `extract`. Parameters ---------- condition : array_like An array whose nonzero or True entries indicate the elements of `arr` to extract. arr : array_like Input array of the same size as `condition`. Returns ------- extract : ndarray Rank 1 array of values from `arr` where `condition` is True. See Also -------- take, put, copyto, compress, place Examples -------- >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> condition = np.mod(arr, 3)==0 >>> condition array([[ True, False, False, True], [False, False, True, False], [False, True, False, False]], dtype=bool) >>> np.extract(condition, arr) array([0, 3, 6, 9]) If `condition` is boolean: >>> arr[condition] array([0, 3, 6, 9]) """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) def place(arr, mask, vals): """ Change elements of an array based on conditional and input values. Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that `place` uses the first N elements of `vals`, where N is the number of True values in `mask`, while `copyto` uses the elements where `mask` is True. Note that `extract` does the exact opposite of `place`. Parameters ---------- arr : array_like Array to put data into. mask : array_like Boolean mask array. Must have the same size as `a`. vals : 1-D sequence Values to put into `a`. Only the first N elements are used, where N is the number of True values in `mask`. If `vals` is smaller than N it will be repeated. See Also -------- copyto, put, take, extract Examples -------- >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr array([[ 0, 1, 2], [44, 55, 44]]) """ return _insert(arr, mask, vals) def disp(mesg, device=None, linefeed=True): """ Display a message on a device. Parameters ---------- mesg : str Message to display. device : object Device to write message. If None, defaults to ``sys.stdout`` which is very similar to ``print``. `device` needs to have ``write()`` and ``flush()`` methods. linefeed : bool, optional Option whether to print a line feed or not. Defaults to True. Raises ------ AttributeError If `device` does not have a ``write()`` or ``flush()`` method. Examples -------- Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: >>> from StringIO import StringIO >>> buf = StringIO() >>> np.disp('"Display" in a file', device=buf) >>> buf.getvalue() '"Display" in a file\\n' """ if device is None: device = sys.stdout if linefeed: device.write('%s\n' % mesg) else: device.write('%s' % mesg) device.flush() return class vectorize(object): """ vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False) Generalized function class. Define a vectorized function which takes a nested sequence of objects or numpy arrays as inputs and returns a numpy array as output. The vectorized function evaluates `pyfunc` over successive tuples of the input arrays like the python map function, except it uses the broadcasting rules of numpy. The data type of the output of `vectorized` is determined by calling the function with the first element of the input. This can be avoided by specifying the `otypes` argument. Parameters ---------- pyfunc : callable A python function or method. otypes : str or list of dtypes, optional The output data type. It must be specified as either a string of typecode characters or a list of data type specifiers. There should be one data type specifier for each output. doc : str, optional The docstring for the function. If `None`, the docstring will be the ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword arguments for which the function will not be vectorized. These will be passed directly to `pyfunc` unmodified. .. versionadded:: 1.7.0 cache : bool, optional If `True`, then cache the first function call that determines the number of outputs if `otypes` is not provided. .. versionadded:: 1.7.0 Returns ------- vectorized : callable Vectorized function. Examples -------- >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: ... return a - b ... else: ... return a + b >>> vfunc = np.vectorize(myfunc) >>> vfunc([1, 2, 3, 4], 2) array([3, 4, 1, 2]) The docstring is taken from the input function to `vectorize` unless it is specified >>> vfunc.__doc__ 'Return a-b if a>b, otherwise return a+b' >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') >>> vfunc.__doc__ 'Vectorized `myfunc`' The output type is determined by evaluating the first element of the input, unless it is specified >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.int32'> >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.float64'> The `excluded` argument can be used to prevent vectorizing over certain arguments. This can be useful for array-like arguments of a fixed length such as the coefficients for a polynomial as in `polyval`: >>> def mypolyval(p, x): ... _p = list(p) ... res = _p.pop(0) ... while _p: ... res = res*x + _p.pop(0) ... return res >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) >>> vpolyval(p=[1, 2, 3], x=[0, 1]) array([3, 6]) Positional arguments may also be excluded by specifying their position: >>> vpolyval.excluded.add(0) >>> vpolyval([1, 2, 3], x=[0, 1]) array([3, 6]) Notes ----- The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. If `otypes` is not specified, then a call to the function with the first argument will be used to determine the number of outputs. The results of this call will be cached if `cache` is `True` to prevent calling the function twice. However, to implement the cache, the original function must be wrapped which will slow down subsequent calls, so only do this if your function is expensive. The new keyword argument interface and `excluded` argument support further degrades performance. """ def __init__(self, pyfunc, otypes='', doc=None, excluded=None, cache=False): self.pyfunc = pyfunc self.cache = cache self._ufunc = None # Caching to improve default performance if doc is None: self.__doc__ = pyfunc.__doc__ else: self.__doc__ = doc if isinstance(otypes, str): self.otypes = otypes for char in self.otypes: if char not in typecodes['All']: raise ValueError( "Invalid otype specified: %s" % (char,)) elif iterable(otypes): self.otypes = ''.join([_nx.dtype(x).char for x in otypes]) else: raise ValueError( "Invalid otype specification") # Excluded variable support if excluded is None: excluded = set() self.excluded = set(excluded) def __call__(self, *args, **kwargs): """ Return arrays with the results of `pyfunc` broadcast (vectorized) over `args` and `kwargs` not in `excluded`. """ excluded = self.excluded if not kwargs and not excluded: func = self.pyfunc vargs = args else: # The wrapper accepts only positional arguments: we use `names` and # `inds` to mutate `the_args` and `kwargs` to pass to the original # function. nargs = len(args) names = [_n for _n in kwargs if _n not in excluded] inds = [_i for _i in range(nargs) if _i not in excluded] the_args = list(args) def func(*vargs): for _n, _i in enumerate(inds): the_args[_i] = vargs[_n] kwargs.update(zip(names, vargs[len(inds):])) return self.pyfunc(*the_args, **kwargs) vargs = [args[_i] for _i in inds] vargs.extend([kwargs[_n] for _n in names]) return self._vectorize_call(func=func, args=vargs) def _get_ufunc_and_otypes(self, func, args): """Return (ufunc, otypes).""" # frompyfunc will fail if args is empty if not args: raise ValueError('args can not be empty') if self.otypes: otypes = self.otypes nout = len(otypes) # Note logic here: We only *use* self._ufunc if func is self.pyfunc # even though we set self._ufunc regardless. if func is self.pyfunc and self._ufunc is not None: ufunc = self._ufunc else: ufunc = self._ufunc = frompyfunc(func, len(args), nout) else: # Get number of outputs and output types by calling the function on # the first entries of args. We also cache the result to prevent # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) inputs = [asarray(_a).flat[0] for _a in args] outputs = func(*inputs) # Performance note: profiling indicates that -- for simple # functions at least -- this wrapping can almost double the # execution time. # Hence we make it optional. if self.cache: _cache = [outputs] def _func(*vargs): if _cache: return _cache.pop() else: return func(*vargs) else: _func = func if isinstance(outputs, tuple): nout = len(outputs) else: nout = 1 outputs = (outputs,) otypes = ''.join([asarray(outputs[_k]).dtype.char for _k in range(nout)]) # Performance note: profiling indicates that creating the ufunc is # not a significant cost compared with wrapping so it seems not # worth trying to cache this. ufunc = frompyfunc(_func, len(args), nout) return ufunc, otypes def _vectorize_call(self, func, args): """Vectorized call to `func` over positional `args`.""" if not args: _res = func() else: ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) # Convert args to object arrays first inputs = [array(_a, copy=False, subok=True, dtype=object) for _a in args] outputs = ufunc(*inputs) if ufunc.nout == 1: _res = array(outputs, copy=False, subok=True, dtype=otypes[0]) else: _res = tuple([array(_x, copy=False, subok=True, dtype=_t) for _x, _t in zip(outputs, otypes)]) return _res def cov(m, y=None, rowvar=1, bias=0, ddof=None): """ Estimate a covariance matrix, given data. Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. Parameters ---------- m : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same form as that of `m`. rowvar : int, optional If `rowvar` is non-zero (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : int, optional Default normalization is by ``(N - 1)``, where ``N`` is the number of observations given (unbiased estimate). If `bias` is 1, then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional .. versionadded:: 1.5 If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is the number of observations; this overrides the value implied by ``bias``. The default value is ``None``. Returns ------- out : ndarray The covariance matrix of the variables. See Also -------- corrcoef : Normalized covariance matrix Examples -------- Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T >>> x array([[0, 1, 2], [2, 1, 0]]) Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance matrix shows this clearly: >>> np.cov(x) array([[ 1., -1.], [-1., 1.]]) Note that element :math:`C_{0,1}`, which shows the correlation between :math:`x_0` and :math:`x_1`, is negative. Further, note how `x` and `y` are combined: >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.vstack((x,y)) >>> print np.cov(X) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print np.cov(x, y) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print np.cov(x) 11.71 """ # Check inputs if ddof is not None and ddof != int(ddof): raise ValueError( "ddof must be integer") # Handles complex arrays too m = np.asarray(m) if y is None: dtype = np.result_type(m, np.float64) else: y = np.asarray(y) dtype = np.result_type(m, y, np.float64) X = array(m, ndmin=2, dtype=dtype) if X.shape[0] == 1: rowvar = 1 if rowvar: N = X.shape[1] axis = 0 else: N = X.shape[0] axis = 1 # check ddof if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 fact = float(N - ddof) if fact <= 0: warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning) fact = 0.0 if y is not None: y = array(y, copy=False, ndmin=2, dtype=dtype) X = concatenate((X, y), axis) X -= X.mean(axis=1-axis, keepdims=True) if not rowvar: return (dot(X.T, X.conj()) / fact).squeeze() else: return (dot(X, X.T.conj()) / fact).squeeze() def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue): """ Return Pearson product-moment correlation coefficients. Please refer to the documentation for `cov` for more detail. The relationship between the correlation coefficient matrix, `R`, and the covariance matrix, `C`, is .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } The values of `R` are between -1 and 1, inclusive. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same shape as `x`. rowvar : int, optional If `rowvar` is non-zero (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional .. deprecated:: 1.10.0 Has no affect, do not use. ddof : _NoValue, optional .. deprecated:: 1.10.0 Has no affect, do not use. Returns ------- R : ndarray The correlation coefficient matrix of the variables. See Also -------- cov : Covariance matrix Notes ----- This function accepts but discards arguments `bias` and `ddof`. This is for backwards compatibility with previous versions of this function. These arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. """ if bias is not np._NoValue or ddof is not np._NoValue: warnings.warn('bias and ddof have no affect and are deprecated', DeprecationWarning) c = cov(x, y, rowvar) try: d = diag(c) except ValueError: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c return c / sqrt(multiply.outer(d, d)) def blackman(M): """ Return the Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, hamming, hanning, kaiser Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the kaiser window. References ---------- Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- >>> np.blackman(12) array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.blackman(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) def bartlett(M): """ Return the Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : array The triangular window, with the maximum value normalized to one (the value one appears only if the number of samples is odd), with the first and last samples equal to zero. See Also -------- blackman, hamming, hanning, kaiser Notes ----- The Bartlett window is defined as .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) Plot the window and its frequency response (requires SciPy and matplotlib): >>> from numpy.fft import fft, fftshift >>> window = np.bartlett(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) def hanning(M): """ Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). See Also -------- bartlett, blackman, hamming, kaiser Notes ----- The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hanning was named for Julius van Hann, an Austrian meteorologist. It is also known as the Cosine Bell. Some authors prefer that it be called a Hann window, to help avoid confusion with the very similar Hamming window. Most references to the Hanning window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hanning(12) array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 0.07937323, 0. ]) Plot the window and its frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hanning(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of the Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) def hamming(M): """ Return the Hamming window. The Hamming window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hanning, kaiser Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 0.15302337, 0.08 ]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hamming(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) ## Code from cephes for i0 _i0A = [ -4.41534164647933937950E-18, 3.33079451882223809783E-17, -2.43127984654795469359E-16, 1.71539128555513303061E-15, -1.16853328779934516808E-14, 7.67618549860493561688E-14, -4.85644678311192946090E-13, 2.95505266312963983461E-12, -1.72682629144155570723E-11, 9.67580903537323691224E-11, -5.18979560163526290666E-10, 2.65982372468238665035E-9, -1.30002500998624804212E-8, 6.04699502254191894932E-8, -2.67079385394061173391E-7, 1.11738753912010371815E-6, -4.41673835845875056359E-6, 1.64484480707288970893E-5, -5.75419501008210370398E-5, 1.88502885095841655729E-4, -5.76375574538582365885E-4, 1.63947561694133579842E-3, -4.32430999505057594430E-3, 1.05464603945949983183E-2, -2.37374148058994688156E-2, 4.93052842396707084878E-2, -9.49010970480476444210E-2, 1.71620901522208775349E-1, -3.04682672343198398683E-1, 6.76795274409476084995E-1 ] _i0B = [ -7.23318048787475395456E-18, -4.83050448594418207126E-18, 4.46562142029675999901E-17, 3.46122286769746109310E-17, -2.82762398051658348494E-16, -3.42548561967721913462E-16, 1.77256013305652638360E-15, 3.81168066935262242075E-15, -9.55484669882830764870E-15, -4.15056934728722208663E-14, 1.54008621752140982691E-14, 3.85277838274214270114E-13, 7.18012445138366623367E-13, -1.79417853150680611778E-12, -1.32158118404477131188E-11, -3.14991652796324136454E-11, 1.18891471078464383424E-11, 4.94060238822496958910E-10, 3.39623202570838634515E-9, 2.26666899049817806459E-8, 2.04891858946906374183E-7, 2.89137052083475648297E-6, 6.88975834691682398426E-5, 3.36911647825569408990E-3, 8.04490411014108831608E-1 ] def _chbevl(x, vals): b0 = vals[0] b1 = 0.0 for i in range(1, len(vals)): b2 = b1 b1 = b0 b0 = x*b1 - b2 + vals[i] return 0.5*(b0 - b2) def _i0_1(x): return exp(x) * _chbevl(x/2.0-2, _i0A) def _i0_2(x): return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) def i0(x): """ Modified Bessel function of the first kind, order 0. Usually denoted :math:`I_0`. This function does broadcast, but will *not* "up-cast" int dtype arguments unless accompanied by at least one float or complex dtype argument (see Raises below). Parameters ---------- x : array_like, dtype float or complex Argument of the Bessel function. Returns ------- out : ndarray, shape = x.shape, dtype = x.dtype The modified Bessel function evaluated at each of the elements of `x`. Raises ------ TypeError: array cannot be safely cast to required type If argument consists exclusively of int dtypes. See Also -------- scipy.special.iv, scipy.special.ive Notes ----- We use the algorithm published by Clenshaw [1]_ and referenced by Abramowitz and Stegun [2]_, for which the function domain is partitioned into the two intervals [0,8] and (8,inf), and Chebyshev polynomial expansions are employed in each interval. Relative error on the domain [0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). References ---------- .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in *National Physical Laboratory Mathematical Tables*, vol. 5, London: Her Majesty's Stationery Office, 1962. .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 379. http://www.math.sfu.ca/~cbm/aands/page_379.htm .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html Examples -------- >>> np.i0([0.]) array(1.0) >>> np.i0([0., 1. + 2j]) array([ 1.00000000+0.j , 0.18785373+0.64616944j]) """ x = atleast_1d(x).copy() y = empty_like(x) ind = (x < 0) x[ind] = -x[ind] ind = (x <= 8.0) y[ind] = _i0_1(x[ind]) ind2 = ~ind y[ind2] = _i0_2(x[ind2]) return y.squeeze() ## End of cephes code for i0 def kaiser(M, beta): """ Return the Kaiser window. The Kaiser window is a taper formed by using a Bessel function. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. beta : float Shape parameter for window. Returns ------- out : array The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hamming, hanning Notes ----- The Kaiser window is defined as .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} \\right)/I_0(\\beta) with .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, where :math:`I_0` is the modified zeroth-order Bessel function. The Kaiser was named for Jim Kaiser, who discovered a simple approximation to the DPSS window based on Bessel functions. The Kaiser window is a very good approximation to the Digital Prolate Spheroidal Sequence, or Slepian window, which is the transform which maximizes the energy in the main lobe of the window relative to total energy. The Kaiser can approximate many other windows by varying the beta parameter. ==== ======================= beta Window shape ==== ======================= 0 Rectangular 5 Similar to a Hamming 6 Similar to a Hanning 8.6 Similar to a Blackman ==== ======================= A beta value of 14 is probably a good starting point. Note that as beta gets large, the window narrows, and so the number of samples needs to be large enough to sample the increasingly narrow spike, otherwise NaNs will get returned. Most references to the Kaiser window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. John Wiley and Sons, New York, (1966). .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 177-178. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function Examples -------- >>> np.kaiser(12, 14) array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.kaiser(51, 14) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ from numpy.dual import i0 if M == 1: return np.array([1.]) n = arange(0, M) alpha = (M-1)/2.0 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) def sinc(x): """ Return the sinc function. The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. Parameters ---------- x : ndarray Array (possibly multi-dimensional) of values for which to to calculate ``sinc(x)``. Returns ------- out : ndarray ``sinc(x)``, which has the same shape as the input. Notes ----- ``sinc(0)`` is the limit value 1. The name sinc is short for "sine cardinal" or "sinus cardinalis". The sinc function is used in various signal processing applications, including in anti-aliasing, in the construction of a Lanczos resampling filter, and in interpolation. For bandlimited interpolation of discrete-time signals, the ideal interpolation kernel is proportional to the sinc function. References ---------- .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/SincFunction.html .. [2] Wikipedia, "Sinc function", http://en.wikipedia.org/wiki/Sinc_function Examples -------- >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, -4.92362781e-02, -3.89804309e-17]) >>> plt.plot(x, np.sinc(x)) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Sinc Function") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("X") <matplotlib.text.Text object at 0x...> >>> plt.show() It works in 2-D as well: >>> x = np.linspace(-4, 4, 401) >>> xx = np.outer(x, x) >>> plt.imshow(np.sinc(xx)) <matplotlib.image.AxesImage object at 0x...> """ x = np.asanyarray(x) y = pi * where(x == 0, 1.0e-20, x) return sin(y)/y def msort(a): """ Return a copy of an array sorted along the first axis. Parameters ---------- a : array_like Array to be sorted. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- sort Notes ----- ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. """ b = array(a, subok=True, copy=True) b.sort(0) return b def _ureduce(a, func, **kwargs): """ Internal Function. Call `func` with `a` as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input array or object that can be converted to an array. func : callable Reduction function Kapable of receiving an axis argument. It is is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce. """ a = np.asanyarray(a) axis = kwargs.get('axis', None) if axis is not None: keepdim = list(a.shape) nd = a.ndim try: axis = operator.index(axis) if axis >= nd or axis < -nd: raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim)) keepdim[axis] = 1 except TypeError: sax = set() for x in axis: if x >= nd or x < -nd: raise IndexError("axis %d out of bounds (%d)" % (x, nd)) if x in sax: raise ValueError("duplicate value in axis") sax.add(x % nd) keepdim[x] = 1 keep = sax.symmetric_difference(frozenset(range(nd))) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 else: keepdim = [1] * a.ndim r = func(a, **kwargs) return r, keepdim def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): """ Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int or sequence of int, optional Axis along which the medians are computed. The default (axis=None) is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array (a) for calculations. The input array will be modified by the call to median. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. Note that, if `overwrite_input` is True and the input is not already an ndarray, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.9.0 Returns ------- median : ndarray A new array holding the result (unless `out` is specified, in which case that array is returned instead). If the input contains integers, or floats of smaller precision than 64, then the output data-type is float64. Otherwise, the output data-type is the same as that of the input. See Also -------- mean, percentile Notes ----- Given a vector V of length N, the median of V is the middle value of a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is odd. When N is even, it is the average of the two middle values of ``V_sorted``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.median(a) 3.5 >>> np.median(a, axis=0) array([ 6.5, 4.5, 2.5]) >>> np.median(a, axis=1) array([ 7., 2.]) >>> m = np.median(a, axis=0) >>> out = np.zeros_like(m) >>> np.median(a, axis=0, out=m) array([ 6.5, 4.5, 2.5]) >>> m array([ 6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.median(b, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.median(b, axis=None, overwrite_input=True) 3.5 >>> assert not np.all(a==b) """ r, k = _ureduce(a, func=_median, axis=axis, out=out, overwrite_input=overwrite_input) if keepdims: return r.reshape(k) else: return r def _median(a, axis=None, out=None, overwrite_input=False): # can't be reasonably be implemented in terms of percentile as we have to # call mean to not break astropy a = np.asanyarray(a) if axis is not None and axis >= a.ndim: raise IndexError( "axis %d out of bounds (%d)" % (axis, a.ndim)) if overwrite_input: if axis is None: part = a.ravel() sz = part.size if sz % 2 == 0: szh = sz // 2 part.partition((szh - 1, szh)) else: part.partition((sz - 1) // 2) else: sz = a.shape[axis] if sz % 2 == 0: szh = sz // 2 a.partition((szh - 1, szh), axis=axis) else: a.partition((sz - 1) // 2, axis=axis) part = a else: if axis is None: sz = a.size else: sz = a.shape[axis] if sz % 2 == 0: part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis) else: part = partition(a, (sz - 1) // 2, axis=axis) if part.shape == (): # make 0-D arrays work return part.item() if axis is None: axis = 0 indexer = [slice(None)] * part.ndim index = part.shape[axis] // 2 if part.shape[axis] % 2 == 1: # index with slice to allow mean (below) to work indexer[axis] = slice(index, index+1) else: indexer[axis] = slice(index-1, index+1) # Use mean in odd and even case to coerce data type # and check, use out array. return mean(part[indexer], axis=axis, out=out) def percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ Compute the qth percentile of the data along the specified axis. Returns the qth percentile of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute which must be between 0 and 100 inclusive. axis : int or sequence of int, optional Axis along which the percentiles are computed. The default (None) is to compute the percentiles along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to percentile. This will save memory when you do not need to preserve the contents of the input array. In this case you should not make any assumptions about the content of the passed in array `a` after this function completes -- treat it as undefined. Default is False. Note that, if the `a` input is not already an array this parameter will have no effect, `a` will be converted to an array internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. .. versionadded:: 1.9.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.9.0 Returns ------- percentile : scalar or ndarray If a single percentile `q` is given and axis=None a scalar is returned. If multiple percentiles `q` are given an array holding the result is returned. The results are listed in the first axis. (If `out` is specified, in which case that array is returned instead). If the input contains integers, or floats of smaller precision than 64, then the output data-type is float64. Otherwise, the output data-type is the same as that of the input. See Also -------- mean, median Notes ----- Given a vector V of length N, the q-th percentile of V is the q-th ranked value in a sorted copy of V. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match q exactly. This function is the same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, 50) array([ 3.5]) >>> np.percentile(a, 50, axis=0) array([[ 6.5, 4.5, 2.5]]) >>> np.percentile(a, 50, axis=1) array([[ 7.], [ 2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=m) array([[ 6.5, 4.5, 2.5]]) >>> m array([[ 6.5, 4.5, 2.5]]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) array([[ 7.], [ 2.]]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.percentile(b, 50, axis=None, overwrite_input=True) array([ 3.5]) """ q = array(q, dtype=np.float64, copy=True) r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: if q.ndim == 0: return r.reshape(k) else: return r.reshape([len(q)] + k) else: return r def _percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): a = asarray(a) if q.ndim == 0: # Do not allow 0-d arrays because following code fails for scalar zerod = True q = q[None] else: zerod = False # avoid expensive reductions, relevant for arrays with < O(1000) elements if q.size < 10: for i in range(q.size): if q[i] < 0. or q[i] > 100.: raise ValueError("Percentiles must be in the range [0,100]") q[i] /= 100. else: # faster than any() if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): raise ValueError("Percentiles must be in the range [0,100]") q /= 100. # prepare a for partioning if overwrite_input: if axis is None: ap = a.ravel() else: ap = a else: if axis is None: ap = a.flatten() else: ap = a.copy() if axis is None: axis = 0 Nx = ap.shape[axis] indices = q * (Nx - 1) # round fractional indices according to interpolation method if interpolation == 'lower': indices = floor(indices).astype(intp) elif interpolation == 'higher': indices = ceil(indices).astype(intp) elif interpolation == 'midpoint': indices = floor(indices) + 0.5 elif interpolation == 'nearest': indices = around(indices).astype(intp) elif interpolation == 'linear': pass # keep index as fraction and interpolate else: raise ValueError( "interpolation can only be 'linear', 'lower' 'higher', " "'midpoint', or 'nearest'") if indices.dtype == intp: # take the points along axis ap.partition(indices, axis=axis) # ensure axis with qth is first ap = np.rollaxis(ap, axis, 0) axis = 0 if zerod: indices = indices[0] r = take(ap, indices, axis=axis, out=out) else: # weight the points above and below the indices indices_below = floor(indices).astype(intp) indices_above = indices_below + 1 indices_above[indices_above > Nx - 1] = Nx - 1 weights_above = indices - indices_below weights_below = 1.0 - weights_above weights_shape = [1, ] * ap.ndim weights_shape[axis] = len(indices) weights_below.shape = weights_shape weights_above.shape = weights_shape ap.partition(concatenate((indices_below, indices_above)), axis=axis) x1 = take(ap, indices_below, axis=axis) * weights_below x2 = take(ap, indices_above, axis=axis) * weights_above # ensure axis with qth is first x1 = np.rollaxis(x1, axis, 0) x2 = np.rollaxis(x2, axis, 0) if zerod: x1 = x1.squeeze(0) x2 = x2.squeeze(0) if out is not None: r = add(x1, x2, out=out) else: r = add(x1, x2) return r def trapz(y, x=None, dx=1.0, axis=-1): """ Integrate along the given axis using the composite trapezoidal rule. Integrate `y` (`x`) along given axis. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional If `x` is None, then spacing between all `y` elements is `dx`. dx : scalar, optional If `x` is None, spacing given by `dx` is assumed. Default is 1. axis : int, optional Specify the axis. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule. See Also -------- sum, cumsum Notes ----- Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will be taken from `y` array, by default x-axis distances between points will be 1.0, alternatively they can be provided with `x` array or with `dx` scalar. Return value will be equal to combined area under the red lines. References ---------- .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png Examples -------- >>> np.trapz([1,2,3]) 4.0 >>> np.trapz([1,2,3], x=[4,6,8]) 8.0 >>> np.trapz([1,2,3], dx=2) 8.0 >>> a = np.arange(6).reshape(2, 3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> np.trapz(a, axis=0) array([ 1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) array([ 2., 8.]) """ y = asanyarray(y) if x is None: d = dx else: x = asanyarray(x) if x.ndim == 1: d = diff(x) # reshape to correct shape shape = [1]*y.ndim shape[axis] = d.shape[0] d = d.reshape(shape) else: d = diff(x, axis=axis) nd = len(y.shape) slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) try: ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) except ValueError: # Operations didn't work, cast to ndarray d = np.asarray(d) y = np.asarray(y) ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) return ret #always succeed def add_newdoc(place, obj, doc): """Adds documentation to obj which is in module place. If doc is a string add it to obj as a docstring If doc is a tuple, then the first element is interpreted as an attribute of obj and the second as the docstring (method, docstring) If doc is a list, then each element of the list should be a sequence of length two --> [(method1, docstring1), (method2, docstring2), ...] This routine never raises an error. This routine cannot modify read-only docstrings, as appear in new-style classes or built-in functions. Because this routine never raises an error the caller must check manually that the docstrings were changed. """ try: new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): add_docstring(new, doc.strip()) elif isinstance(doc, tuple): add_docstring(getattr(new, doc[0]), doc[1].strip()) elif isinstance(doc, list): for val in doc: add_docstring(getattr(new, val[0]), val[1].strip()) except: pass # Based on scitools meshgrid def meshgrid(*xi, **kwargs): """ Return coordinate matrices from coordinate vectors. Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. .. versionchanged:: 1.9 1-D and 0-D cases are allowed. Parameters ---------- x1, x2,..., xn : array_like 1-D arrays representing the coordinates of a grid. indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. .. versionadded:: 1.7.0 sparse : bool, optional If True a sparse grid is returned in order to conserve memory. Default is False. .. versionadded:: 1.7.0 copy : bool, optional If False, a view into the original arrays are returned in order to conserve memory. Default is True. Please note that ``sparse=False, copy=False`` will likely return non-contiguous arrays. Furthermore, more than one element of a broadcast array may refer to a single memory location. If you need to write to the arrays, make copies first. .. versionadded:: 1.7.0 Returns ------- X1, X2,..., XN : ndarray For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' with the elements of `xi` repeated to fill the matrix along the first dimension for `x1`, the second for `x2` and so on. Notes ----- This function supports both indexing conventions through the indexing keyword argument. Giving the string 'ij' returns a meshgrid with matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case with inputs of length M and N, the outputs are of shape (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is illustrated by the following code snippet:: xv, yv = meshgrid(x, y, sparse=False, indexing='ij') for i in range(nx): for j in range(ny): # treat xv[i,j], yv[i,j] xv, yv = meshgrid(x, y, sparse=False, indexing='xy') for i in range(nx): for j in range(ny): # treat xv[j,i], yv[j,i] In the 1-D and 0-D case, the indexing and sparse keywords have no effect. See Also -------- index_tricks.mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" using indexing notation. Examples -------- >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) >>> xv, yv = meshgrid(x, y) >>> xv array([[ 0. , 0.5, 1. ], [ 0. , 0.5, 1. ]]) >>> yv array([[ 0., 0., 0.], [ 1., 1., 1.]]) >>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv array([[ 0. , 0.5, 1. ]]) >>> yv array([[ 0.], [ 1.]]) `meshgrid` is very useful to evaluate functions on a grid. >>> x = np.arange(-5, 5, 0.1) >>> y = np.arange(-5, 5, 0.1) >>> xx, yy = meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) """ ndim = len(xi) copy_ = kwargs.pop('copy', True) sparse = kwargs.pop('sparse', False) indexing = kwargs.pop('indexing', 'xy') if kwargs: raise TypeError("meshgrid() got an unexpected keyword argument '%s'" % (list(kwargs)[0],)) if indexing not in ['xy', 'ij']: raise ValueError( "Valid values for `indexing` are 'xy' and 'ij'.") s0 = (1,) * ndim output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(xi)] shape = [x.size for x in output] if indexing == 'xy' and ndim > 1: # switch first and second axis output[0].shape = (1, -1) + (1,)*(ndim - 2) output[1].shape = (-1, 1) + (1,)*(ndim - 2) shape[0], shape[1] = shape[1], shape[0] if sparse: if copy_: return [x.copy() for x in output] else: return output else: # Return the full N-D matrix (not only the 1-D vector) if copy_: mult_fact = np.ones(shape, dtype=int) return [x * mult_fact for x in output] else: return np.broadcast_arrays(*output) def delete(arr, obj, axis=None): """ Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by `arr[obj]`. Parameters ---------- arr : array_like Input array. obj : slice, int or array of ints Indicate which sub-arrays to remove. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. Returns ------- out : ndarray A copy of `arr` with the elements specified by `obj` removed. Note that `delete` does not occur in-place. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. append : Append elements at the end of an array. Notes ----- Often it is preferable to use a boolean mask. For example: >>> mask = np.ones(len(arr), dtype=bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further use of `mask`. Examples -------- >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], [ 5, 6, 7, 8], [ 9, 10, 11, 12]]) >>> np.delete(arr, 1, 0) array([[ 1, 2, 3, 4], [ 9, 10, 11, 12]]) >>> np.delete(arr, np.s_[::2], 1) array([[ 2, 4], [ 6, 8], [10, 12]]) >>> np.delete(arr, [1,3,5], None) array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 if ndim == 0: warnings.warn( "in the future the special handling of scalars will be removed " "from delete and raise an error", DeprecationWarning) if wrap: return wrap(arr) else: return arr.copy() slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): start, stop, step = obj.indices(N) xr = range(start, stop, step) numtodel = len(xr) if numtodel <= 0: if wrap: return wrap(arr.copy()) else: return arr.copy() # Invert if step is negative: if step < 0: step = -step start = xr[-1] stop = xr[0] + 1 newshape[axis] -= numtodel new = empty(newshape, arr.dtype, arr.flags.fnc) # copy initial chunk if start == 0: pass else: slobj[axis] = slice(None, start) new[slobj] = arr[slobj] # copy end chunck if stop == N: pass else: slobj[axis] = slice(stop-numtodel, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(stop, None) new[slobj] = arr[slobj2] # copy middle pieces if step == 1: pass else: # use array indexing. keep = ones(stop-start, dtype=bool) keep[:stop-start:step] = False slobj[axis] = slice(start, stop-numtodel) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(start, stop) arr = arr[slobj2] slobj2[axis] = keep new[slobj] = arr[slobj2] if wrap: return wrap(new) else: return new _obj = obj obj = np.asarray(obj) # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: warnings.warn( "in the future insert will treat boolean arrays and array-likes " "as boolean index instead of casting it to integer", FutureWarning) obj = obj.astype(intp) if isinstance(_obj, (int, long, integer)): # optimization for a single value obj = obj.item() if (obj < -N or obj >= N): raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (obj < 0): obj += N newshape[axis] -= 1 new = empty(newshape, arr.dtype, arr.flags.fnc) slobj[axis] = slice(None, obj) new[slobj] = arr[slobj] slobj[axis] = slice(obj, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(obj+1, None) new[slobj] = arr[slobj2] else: if obj.size == 0 and not isinstance(_obj, np.ndarray): obj = obj.astype(intp) if not np.can_cast(obj, intp, 'same_kind'): # obj.size = 1 special case always failed and would just # give superfluous warnings. warnings.warn( "using a non-integer array as obj in delete will result in an " "error in the future", DeprecationWarning) obj = obj.astype(intp) keep = ones(N, dtype=bool) # Test if there are out of bound indices, this is deprecated inside_bounds = (obj < N) & (obj >= -N) if not inside_bounds.all(): warnings.warn( "in the future out of bounds indices will raise an error " "instead of being ignored by `numpy.delete`.", DeprecationWarning) obj = obj[inside_bounds] positive_indices = obj >= 0 if not positive_indices.all(): warnings.warn( "in the future negative indices will not be ignored by " "`numpy.delete`.", FutureWarning) obj = obj[positive_indices] keep[obj, ] = False slobj[axis] = keep new = arr[slobj] if wrap: return wrap(new) else: return new def insert(arr, obj, values, axis=None): """ Insert values along the given axis before the given indices. Parameters ---------- arr : array_like Input array. obj : int, slice or sequence of ints Object that defines the index or indices before which `values` is inserted. .. versionadded:: 1.8.0 Support for multiple insertions when `obj` is a single scalar or a sequence with one element (similar to calling insert multiple times). values : array_like Values to insert into `arr`. If the type of `values` is different from that of `arr`, `values` is converted to the type of `arr`. `values` should be shaped so that ``arr[...,obj,...] = values`` is legal. axis : int, optional Axis along which to insert `values`. If `axis` is None then `arr` is flattened first. Returns ------- out : ndarray A copy of `arr` with `values` inserted. Note that `insert` does not occur in-place: a new array is returned. If `axis` is None, `out` is a flattened array. See Also -------- append : Append elements at the end of an array. concatenate : Join a sequence of arrays together. delete : Delete elements from an array. Notes ----- Note that for higher dimensional inserts `obj=0` behaves very different from `obj=[0]` just like `arr[:,0,:] = values` is different from `arr[:,[0],:] = values`. Examples -------- >>> a = np.array([[1, 1], [2, 2], [3, 3]]) >>> a array([[1, 1], [2, 2], [3, 3]]) >>> np.insert(a, 1, 5) array([1, 5, 1, 2, 2, 3, 3]) >>> np.insert(a, 1, 5, axis=1) array([[1, 5, 1], [2, 5, 2], [3, 5, 3]]) Difference between sequence and scalars: >>> np.insert(a, [1], [[1],[2],[3]], axis=1) array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), ... np.insert(a, [1], [[1],[2],[3]], axis=1)) True >>> b = a.flatten() >>> b array([1, 1, 2, 2, 3, 3]) >>> np.insert(b, [2, 2], [5, 6]) array([1, 1, 5, 6, 2, 2, 3, 3]) >>> np.insert(b, slice(2, 4), [5, 6]) array([1, 1, 5, 2, 6, 2, 3, 3]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting array([1, 1, 7, 0, 2, 2, 3, 3]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) >>> np.insert(x, idx, 999, axis=1) array([[ 0, 999, 1, 2, 999, 3], [ 4, 999, 5, 6, 999, 7]]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 else: if ndim > 0 and (axis < -ndim or axis >= ndim): raise IndexError( "axis %i is out of bounds for an array of " "dimension %i" % (axis, ndim)) if (axis < 0): axis += ndim if (ndim == 0): warnings.warn( "in the future the special handling of scalars will be removed " "from insert and raise an error", DeprecationWarning) arr = arr.copy() arr[...] = values if wrap: return wrap(arr) else: return arr slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): # turn it into a range object indices = arange(*obj.indices(N), **{'dtype': intp}) else: # need to copy obj, because indices will be changed in-place indices = np.array(obj) if indices.dtype == bool: # See also delete warnings.warn( "in the future insert will treat boolean arrays and " "array-likes as a boolean index instead of casting it to " "integer", FutureWarning) indices = indices.astype(intp) # Code after warning period: #if obj.ndim != 1: # raise ValueError('boolean array argument obj to insert ' # 'must be one dimensional') #indices = np.flatnonzero(obj) elif indices.ndim > 1: raise ValueError( "index array argument obj to insert must be one dimensional " "or scalar") if indices.size == 1: index = indices.item() if index < -N or index > N: raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (index < 0): index += N # There are some object array corner cases here, but we cannot avoid # that: values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) if indices.ndim == 0: # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that # it works likes the second case. (here a[:,0:1,:]) values = np.rollaxis(values, 0, (axis % values.ndim) + 1) numnew = values.shape[axis] newshape[axis] += numnew new = empty(newshape, arr.dtype, arr.flags.fnc) slobj[axis] = slice(None, index) new[slobj] = arr[slobj] slobj[axis] = slice(index, index+numnew) new[slobj] = values slobj[axis] = slice(index+numnew, None) slobj2 = [slice(None)] * ndim slobj2[axis] = slice(index, None) new[slobj] = arr[slobj2] if wrap: return wrap(new) return new elif indices.size == 0 and not isinstance(obj, np.ndarray): # Can safely cast the empty list to intp indices = indices.astype(intp) if not np.can_cast(indices, intp, 'same_kind'): warnings.warn( "using a non-integer array as obj in insert will result in an " "error in the future", DeprecationWarning) indices = indices.astype(intp) indices[indices < 0] += N numnew = len(indices) order = indices.argsort(kind='mergesort') # stable sort indices[order] += np.arange(numnew) newshape[axis] += numnew old_mask = ones(newshape[axis], dtype=bool) old_mask[indices] = False new = empty(newshape, arr.dtype, arr.flags.fnc) slobj2 = [slice(None)]*ndim slobj[axis] = indices slobj2[axis] = old_mask new[slobj] = values new[slobj2] = arr if wrap: return wrap(new) return new def append(arr, values, axis=None): """ Append values to the end of an array. Parameters ---------- arr : array_like Values are appended to a copy of this array. values : array_like These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. delete : Delete elements from an array. Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) When `axis` is specified, `values` must have the correct shape. >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): ... ValueError: arrays must have same number of dimensions """ arr = asanyarray(arr) if axis is None: if arr.ndim != 1: arr = arr.ravel() values = ravel(values) axis = arr.ndim-1 return concatenate((arr, values), axis=axis)
bsd-3-clause
gusseppe/sparkmach
sparkmach/analyze.py
1
3693
#!/usr/bin/python # -*- coding: utf-8 -*- # Author: Gusseppe Bravo <[email protected]> # License: BSD 3 clause """ This module provides a few of useful functions (actually, methods) for describing the dataset which is to be studied. """ from __future__ import print_function import numpy as np import matplotlib.pyplot as plt import pandas as pd from pandas.tools.plotting import scatter_matrix #try: # sc.stop() #except: # pass __all__ = [ 'read', 'description', 'classBalance', 'hist', 'density'] class Analyze(): """ A class for data analysis """ def __init__(self, definer): """The init class. Parameters ---------- problem_type : string String that indicates if the model will be train for clasification or regression. response : string String that indicates which column in the dataset is the class. """ self.problem_type = definer.problem_type self.infer_algorithm = definer.infer_algorithm self.response = definer.response self.data_path = definer.data_path self.data = definer.data def pipeline(self): analyzers = [] analyzers.append(self.hist) analyzers.append(self.density) analyzers.append(self.corr) analyzers.append(self.scatter) [m() for m in analyzers] return self def description(self): """Shows a basic data description . Returns ------- out : ndarray """ #return self.data.describe() return pd.DataFrame(self.data.describe()) def classBalance(self): """Shows how balanced the class values are. Returns ------- out : pandas.core.series.Series Serie showing the count of classes. """ return self.data.toPandas().groupby(self.response).size() def hist(self, ax=None): #plt.figure(figsize=(10.8, 3.6)) #for column in df: #df[column].hist(color=[(0.196, 0.694, 0.823)], ax=ax, align='left', label = 'Frequency bar of subsectors') self.data.toPandas().hist(color=[(0.196, 0.694, 0.823)], ax=ax, label='frecuencia') plt.legend(loc='best') if ax is None: plt.show() def density(self, ax=None): #Analyze.data.plot(color=[(0.196, 0.694, 0.823)], kind='density', #subplots=True, layout=(3,3), sharex=False, figsize = (10, 10)) self.data.toPandas().plot(kind='density', subplots=True, layout=(3,3), sharex=False, ax=ax) if ax is None: plt.show() def corr(self, ax=None): corr = self.data.toPandas().corr() names = list(self.data.toPandas().columns.values) fig, ax1 = plt.subplots() if ax is not None: bar = ax.matshow(corr, vmin=-1, vmax=1) else: bar = ax1.matshow(corr, vmin=-1, vmax=1) fig.colorbar(bar) #plt.xticks(range(len(corr.columns)), corr.columns) #plt.yticks(range(len(corr.columns)), corr.columns) ax.set_xticks(range(len(corr.columns))) ax.set_yticks(range(len(corr.columns))) ax.set_xticklabels(names) ax.set_yticklabels(names) if ax is None: plt.show() def scatter(self, ax=None): scatter_matrix(self.data.toPandas(), alpha=0.7, figsize=(6, 6), diagonal='kde', ax=ax) if ax is None: plt.show() def box(self, ax=None): self.data.toPandas().plot(kind="box" , subplots=True, layout=(3,3), sharex=False, sharey=False, ax=ax) if ax is None: plt.show()
mit
sanguinariojoe/aquagpusph
examples/2D/taylor_green/cMake/templates/plot_e.py
1
4104
#****************************************************************************** # * # * ** * * * * * # * * * * * * * * * * # ***** * * * * ***** ** *** * * ** *** *** * # * * * * * * * * * * * * * * * * * * * * # * * * * * * * * * * * * * * * * * * * * # * * ** * ** * * *** *** *** ** *** * * * # * * * * # ** * * * # * #****************************************************************************** # * # This file is part of AQUAgpusph, a free CFD program based on SPH. * # Copyright (C) 2012 Jose Luis Cercos Pita <[email protected]> * # * # AQUAgpusph is free software: you can redistribute it and/or modify * # it under the terms of the GNU General Public License as published by * # the Free Software Foundation, either version 3 of the License, or * # (at your option) any later version. * # * # AQUAgpusph is distributed in the hope that it will be useful, * # but WITHOUT ANY WARRANTY; without even the implied warranty of * # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # GNU General Public License for more details. * # * # You should have received a copy of the GNU General Public License * # along with AQUAgpusph. If not, see <http://www.gnu.org/licenses/>. * # * #****************************************************************************** import sys import os from os import path import matplotlib.pyplot as plt import matplotlib.animation as animation T = {{L}} / {{U}} Ek = {{E_KIN}} def readFile(filepath): """ Read and extract data from a file :param filepath File ot read """ abspath = filepath if not path.isabs(filepath): abspath = path.join(path.dirname(path.abspath(__file__)), filepath) # Read the file by lines f = open(abspath, "r") lines = f.readlines() f.close() data = [] for l in lines[1:-1]: # Skip the last line, which may be unready l = l.strip() l = l.replace('\t', ' ') while l.find(' ') != -1: l = l.replace(' ', ' ') fields = l.split(' ') try: data.append(map(float, fields)) except: continue # Transpose the data return map(list, zip(*data)) line = None fig = plt.figure() ax = fig.add_subplot(111) line, = ax.plot([0.0], [0.0], color="black", linestyle="-", linewidth=1.0) # Set some options ax.grid() ax.set_xlim(0.0, 10.0) ax.set_ylim(0.0, 1.1) ax.set_autoscale_on(False) ax.set_xlabel(r"$t U / L$") ax.set_ylabel(r"$\mathcal{E}_{k}(t) / \mathcal{E}_{k}(0)$") # Animate def update(frame_index): plt.tight_layout() try: data = readFile('EnergyKin.dat') t = data[0] e = data[1] for i in range(len(t)): t[i] /= self.T e[i] /= self.Ek except IndexError: return except FileNotFoundError: return line.set_data(t, e) ax.set_xlim(0, t[-1]) update(0) ani = animation.FuncAnimation(fig, update, interval=1000) plt.show()
gpl-3.0
scienceopen/mayavi-examples-python
PlotHDF5.py
1
2230
#!/usr/bin/env python3 """ Plotting 4-D data set: 3-D spatial + time. Each HDF5 file holds one time step (3-D array inside file) """ import h5py import numpy as np from pathlib import Path from scipy.interpolate import RectBivariateSpline # interp2d from matplotlib.pyplot import figure, show from mayavi import mlab def loadplot(fn: Path): datfn = Path(fn).expanduser() with h5py.File(datfn, "r") as f: Ne = np.rot90(f["/den1"][:128, ...]) yg = np.linspace(0, 51.2, Ne.shape[2]) xg = np.linspace(0, 51.2, Ne.shape[1]) # zg = np.linspace(0, 51.2, Ne.shape[0]) # %% FNe = np.fft.fft2(Ne) Fmag = np.fft.fftshift(abs(FNe)) # %% A = np.arange(0, 2 * np.pi, 0.01) r = 2 * np.pi / 3 # [m] x = r * np.cos(A) y = r * np.sin(A) # for now just taking the first slice f = RectBivariateSpline(xg, yg, Ne[0, ...]) # interp2d never finished, extremely long run time--never had this problem before (!) xm, ym = np.meshgrid(xg, yg) # f = interp2d(xm,ym,Ne) # BivarateSpline is using FITPACK, which appears to accept only single coordinate pairs at a time. iNe = np.empty(x.size) for i, (xi, yi) in enumerate(zip(x, y)): iNe[i] = f(xi, yi) # %% if 0: fg = figure() ax = fg.gca() hi = ax.pcolormesh(xg, yg, Ne) ax.set_xlabel("x [m]") ax.set_ylabel("y [m]") ax.set_title("Number density $N_e$") fg.colorbar(hi).set_label("$N_e$ [normalized]") ax = figure().gca() hi = ax.pcolormesh(Fmag) # state machine code--less reliable way to plot # plt.pcolormesh(Ne) # plt.colorbar() # plt.show() ax = figure().gca() ax.plot(np.degrees(A), iNe) ax.set_xlabel("angle [degrees]") ax.set_ylabel("power") ax.autoscale(True, axis="x", tight=True) fg = mlab.figure() scf = mlab.pipeline.scalar_field(Ne, figure=fg) vol = mlab.pipeline.volume(scf, figure=fg) mlab.colorbar(vol) mlab.show() if __name__ == "__main__": from argparse import ArgumentParser p = ArgumentParser() p.add_argument("fn", help="HDF5 3-D time step file") p = p.parse_args() loadplot(p.fn) show()
mit
tensorflow/minigo
oneoffs/sharp_positions.py
7
16295
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Used as a starting point for our generating hard position collection. training_curve.py is a supervised approach, assuming the pro moves are correct Performance is then measured against the played move and eventual outcome. sharp_positions is unsupervised, it tries to determine what the correct outcome and move is based on clustering of the strongest rated algorithms. Step 1. Create collection.csv of sgf and move number export BOARD_SIZE=19 SGF_DIR=data/sgf MODEL_DIR=models/ python3 sharp_positions.py subsample --num_positions 10 --sgf_dir $SGF_DIR Step 2. Create a directory of those sgfs truncated at the specified move number # from https://github.com/sethtroisi/go-scripts/ ./truncate.sh # Remove any bad SGFS. cat badfiles.txt | tqdm | xargs rm # Remove original SGFs cat ../minigo/collection.csv | cut -f1 -d, | sort -u | tqdm | xargs rm # Rerun truncate successfully this time ./truncate.sh Step 3. Get value & policy for all models for all positions python3 sharp_positions.py evaluate --sgf_dir <problem-collections2> --model_dir $MODEL_DIR Step 4. Fit a model and minimize a set of positions to predict strength python3 sharp_positions.py minimize \ --model_dir $MODEL_DIR --sgf_dir data/s \ --rating_json ratings.json --results results.csv """ import sys sys.path.insert(0, '.') import itertools import json import multiprocessing import os import random from collections import defaultdict, Counter import fire import numpy as np import tensorflow as tf from absl import flags from sklearn import svm from tqdm import tqdm import oneoff_utils # Filter tensorflow info os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' flags.DEFINE_integer("num_positions", 1, "How many positions from each game.") flags.DEFINE_integer("top_n", 3, "Policy moves to record per position.") flags.DEFINE_integer("min_idx", 100, "Min model number to include.") flags.DEFINE_integer("batch_size", 64, "Eval batch size.") # Inputs flags.DEFINE_string("sgf_dir", "data/s/", "input collection of SGFs") flags.DEFINE_string("model_dir", "models/", "Folder of Minigo models") flags.DEFINE_string("rating_json", "ratings.json", "Ratings of models") # Outputs flags.DEFINE_string("collection", "collection.csv", "subsampled csv file") flags.DEFINE_string("results", "results.csv", "Evaluate results file") flags.DEFINE_string("SVM_json", "SVM_data.json", "SVM data about positions") FLAGS = flags.FLAGS def grouper(n, iterable): """Itertools recipe >>> list(grouper(3, iter('ABCDEFG'))) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] """ return (iterable[i:i + n] for i in range(0, len(iterable), n)) def get_final_positions(): sgf_files = sorted(oneoff_utils.find_and_filter_sgf_files(FLAGS.sgf_dir)) with multiprocessing.Pool() as pool: pos = list(pool.map(oneoff_utils.final_position_sgf, tqdm(sgf_files))) assert len(pos) > 0, "BOARD_SIZE != 19?" return sgf_files, pos def get_model_idx(model_name): number = os.path.basename(model_name).split('-')[0] assert len(number) == 6 and 0 <= int(number) <= 1000, model_name return int(number) def subsample(): """Sample num_positions postions from each game in sgf_dir Usage: python3 sharp_positions.py subsample --num_positions 10 --sgf_dir data/s NOTE(sethtroisi): see link for a script to truncate SGFs at move number https://github.com/sethtroisi/go-scripts """ sgf_files = oneoff_utils.find_and_filter_sgf_files( FLAGS.sgf_dir, None, None) with open(FLAG.collection, 'w') as collection: fails = 0 for path in tqdm(sorted(sgf_files)): try: positions, moves, results = oneoff_utils.parse_sgf(path) except KeyboardInterrupt: raise except Exception as e: fails += 1 print("Fail {}, while parsing {}: {}".format(fails, path, e)) continue moves = len(positions) indexes = random.sample(range(10, moves), FLAGS.num_positions) for index in sorted(indexes): collection.write('{}, {}\n'.format(path, index)) def evaluate(): """Get Policy and Value for each network, for each position Usage: python3 sharp_positions.py evaluate --sgf_dir data/s --model_dir models/ """ def short_str(v): if isinstance(v, float): return "{.3f}".format(v) return str(v) # Load positons sgf_names, all_positions = get_final_positions() # Run and save some data about each position # Save to csv because that's easy model_paths = oneoff_utils.get_model_paths(FLAGS.model_dir) num_models = len(model_paths) print("Evaluating {} models: {} to {}".format( num_models, model_paths[0], model_paths[-1])) print() with open(FLAGS.results, "w") as results: results.write(",".join(sgf_names) + "\n") player = None for idx in tqdm(range(FLAGS.min_idx, num_models, 1), desc="model"): model = model_paths[idx] if player and idx % 50 == 0: player.network.sess.close() tf.reset_default_graph() player = None if player: oneoff_utils.restore_params(model, player) else: player = oneoff_utils.load_player(model) row = [model] for positions in grouper(FLAGS.batch_size, all_positions): probs, values = player.network.run_many(positions) # NOTE(sethtroisi): For now we store the top n moves to shrink # the size of the recorded data. top_n = FLAGS.top_n top_policy_move = np.fliplr(np.argsort(probs))[:,:top_n] top_policy_value = np.fliplr(np.sort(probs))[:,:top_n] # One position at a time for v, m, p in zip(values, top_policy_move, top_policy_value): row.append(v) row.extend(itertools.chain.from_iterable(zip(m, p))) if len(positions) > 10: average_seen = top_policy_value.sum() / len(positions) if average_seen < 0.3: print("\t", average_seen, top_policy_value.sum(axis=-1)) results.write(",".join(map(short_str, row)) + "\n") def minimize(): """Find a subset of problems that maximal explains rating. Usage: python3 sharp_positions.py minimize \ --model_dir models --sgf_dir data/s --rating_json ratings.json --results results.csv """ ########################### HYPER PARAMETERS ############################### # Stop when r2 is this much worse than full set of positions r2_stopping_percent = 0.96 # for this many iterations stopping_iterations = 5 # Limit SVM to a smaller number of positions to speed up code. max_positions_fit = 300 # Filter any position that "contributes" less than this percent of max. filter_contribution_percent = 0.3 # Never filter more than this many positions in one iterations filter_limit = 25 ########################### HYPER PARAMETERS ############################### # Load positons model_paths = oneoff_utils.get_model_paths(FLAGS.model_dir) num_models = len(model_paths) assert num_models > 0, FLAGS.model_dir # Load model ratings # wget https://cloudygo.com/v12-19x19/json/ratings.json ratings = json.load(open(FLAGS.rating_json)) raw_ratings = {int(r[0]): float(r[1]) for r in ratings} model_ratings = [] for model in model_paths: model_idx = get_model_idx(model) if model_idx < FLAGS.min_idx: continue model_ratings.append(raw_ratings[model_idx]) model_ratings = np.array(model_ratings) assert 0 < len(model_ratings) <= num_models, len(model_ratings) num_models = len(model_ratings) sgf_names, all_positions = get_final_positions() # Trim off common path prefix. common_path = os.path.commonpath(sgf_names) sgf_names = [name[len(common_path) + 1:] for name in sgf_names] print("Considering {} positions, {} models".format( len(all_positions), num_models)) print() # Load model data top_n = FLAGS.top_n positions = defaultdict(list) with open(FLAGS.results) as results: headers = results.readline().strip() assert headers.count(",") + 1 == len(sgf_names) # Row is <model_name> + positions x [value, top_n x [move, move_policy]] for row in tqdm(results.readlines(), desc="result line"): data = row.split(",") model_idx = get_model_idx(data.pop(0)) if model_idx < FLAGS.min_idx: continue data_per = 1 + top_n * 2 assert len(data) % data_per == 0, len(data) for position, position_data in enumerate(grouper(data_per, data)): value = float(position_data.pop(0)) moves = list(map(int, position_data[0::2])) move_policy = list(map(float, position_data[1::2])) positions[position].append([value, moves, move_policy]) def one_hot(n, i): one_hot = [0] * n if 0 <= i < n: one_hot[i] += 1 return one_hot # NOTE: top_n isn't the same semantic value here and can be increased. one_hot_moves = top_n num_features = 1 + 5 + (one_hot_moves + 1) # Features by position features = [] pos_top_moves = [] for position, data in tqdm(positions.items(), desc="featurize"): assert len(data) == num_models, len(data) top_moves = Counter([d[1][0] for d in data]) top_n_moves = [m for m, c in top_moves.most_common(one_hot_moves)] if len(top_n_moves) < one_hot_moves: top_n_moves.extend([-1] * (one_hot_moves - len(top_n_moves))) assert len(top_n_moves) == one_hot_moves, "pad with dummy moves" pos_top_moves.append(top_n_moves) # Eventaully we want # [model 1 position 1 features, m1 p2 features, m1 p3 features, ... ] # [model 2 position 1 features, m2 p2 features, m2 p3 features, ... ] # [model 3 position 1 features, m3 p2 features, m3 p3 features, ... ] # ... # [model m position 1 features, mm p2 features, mm p3 features, ... ] # We'll do position selection by joining [model x position_feature] feature_columns = [] for model, (v, m, mv) in enumerate(data): # Featurization (for each positions): # * Value (-1 to 1), Bucketed value # * Cluster all model by top_n moves (X,Y,Z or other)? # * value of that move for model # * policy value of top move model_features = [] model_features.append(2 * v - 1) # NOTE(sethtroisi): Consider bucketize value by value percentiles. value_bucket = np.searchsorted((0.2, 0.4, 0.6, 0.8), v) model_features.extend(one_hot(5, value_bucket)) # Policy weight for most common X moves (among all models). policy_weights = [0] * (one_hot_moves + 1) for move, policy_value in zip(m, mv): if move in top_n_moves: policy_weights[top_n_moves.index(move)] = policy_value else: policy_weights[-1] += policy_value model_features.extend(policy_weights) assert len(model_features) == num_features feature_columns.append(model_features) features.append(feature_columns) features = np.array(features) print("Feature shape", features.shape) print() # Split the models to test / train train_size = int(num_models * 0.9) train_models = sorted(np.random.permutation(num_models)[:train_size]) test_models = sorted(set(range(num_models)) - set(train_models)) assert set(train_models + test_models) == set(range(num_models)) features_train = features[:, train_models, :] features_test = features[:, test_models, :] labels_train = model_ratings[train_models] labels_test = model_ratings[test_models] # Choose some set of positions and see how well they explain ratings positions_to_use = set(positions.keys()) linearSVM = svm.LinearSVR() best_test_r2 = 0 below_threshold = 0 for iteration in itertools.count(1): iter_positions = np.random.permutation(list(positions_to_use)) iter_positions = sorted(iter_positions[:max_positions_fit]) # Take this set of positions and build X X = np.concatenate(features_train[iter_positions], axis=1) Xtest = np.concatenate(features_test[iter_positions], axis=1) assert X.shape == (train_size, num_features * len(iter_positions)) linearSVM.fit(X, labels_train) score_train = linearSVM.score(X, labels_train) score_test = linearSVM.score(Xtest, labels_test) print("iter {}, {}/{} included, R^2: {:.4f} train, {:.3f} test".format( iteration, len(iter_positions), len(positions_to_use), score_train, score_test)) # Determine the most and least useful position: # TODO(amj,brilee): Validate this math. assert len(linearSVM.coef_) == num_features * len(iter_positions) # The intercepts tell us how much this contributes to overall rating # but coef tell us how much different answers differentiate rating. coef_groups = list(grouper(num_features, linearSVM.coef_)) position_coefs = [abs(sum(c)) for c in coef_groups] pos_value_idx = np.argsort(position_coefs) max_pos = pos_value_idx[-1] most_value = position_coefs[max_pos] print("\tMost value {} => {:.1f} {}".format( max_pos, most_value, sgf_names[iter_positions[max_pos]])) # Drop any positions that aren't very useful for dropped, pos_idx in enumerate(pos_value_idx[:filter_limit], 1): contribution = position_coefs[pos_idx] positions_to_use.remove(iter_positions[pos_idx]) print("\t\tdropping({}): {:.1f} {}".format( dropped, contribution, sgf_names[iter_positions[pos_idx]])) if contribution > filter_contribution_percent * most_value: break print() best_test_r2 = max(best_test_r2, score_test) if score_test > r2_stopping_percent * best_test_r2: below_threshold = 0 else: below_threshold += 1 if below_threshold == stopping_iterations: print("{}% decrease in R^2, stopping".format( 100 - int(100 * r2_stopping_percent))) break # Write down the differentiating positions and their answers. svm_data = [] for position_idx in list(reversed(pos_value_idx)): coefs = coef_groups[position_idx] # Global position index. position = iter_positions[position_idx] sgf_name = sgf_names[position] top_moves = pos_top_moves[position] svm_data.append([sgf_name, [top_moves, coefs.tolist()]]) with open(FLAGS.SVM_json, "w") as svm_json: json.dump(svm_data, svm_json) print("Dumped data about {} positions to {}".format( len(svm_data), FLAGS.SVM_json)) if __name__ == "__main__": remaining_argv = flags.FLAGS(sys.argv, known_only=True) fire.Fire({ 'subsample': subsample, 'evaluate': evaluate, 'minimize': minimize, }, remaining_argv[1:])
apache-2.0
lukeiwanski/tensorflow
tensorflow/contrib/timeseries/examples/predict.py
69
5579
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """An example of training and predicting with a TFTS estimator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import numpy as np import tensorflow as tf try: import matplotlib # pylint: disable=g-import-not-at-top matplotlib.use("TkAgg") # Need Tk for interactive plots. from matplotlib import pyplot # pylint: disable=g-import-not-at-top HAS_MATPLOTLIB = True except ImportError: # Plotting requires matplotlib, but the unit test running this code may # execute in an environment without it (i.e. matplotlib is not a build # dependency). We'd still like to test the TensorFlow-dependent parts of this # example, namely train_and_predict. HAS_MATPLOTLIB = False FLAGS = None def structural_ensemble_train_and_predict(csv_file_name): # Cycle between 5 latent values over a period of 100. This leads to a very # smooth periodic component (and a small model), which is a good fit for our # example data. Modeling high-frequency periodic variations will require a # higher cycle_num_latent_values. structural = tf.contrib.timeseries.StructuralEnsembleRegressor( periodicities=100, num_features=1, cycle_num_latent_values=5) return train_and_predict(structural, csv_file_name, training_steps=150) def ar_train_and_predict(csv_file_name): # An autoregressive model, with periodicity handled as a time-based # regression. Note that this requires windows of size 16 (input_window_size + # output_window_size) for training. ar = tf.contrib.timeseries.ARRegressor( periodicities=100, input_window_size=10, output_window_size=6, num_features=1, # Use the (default) normal likelihood loss to adaptively fit the # variance. SQUARED_LOSS overestimates variance when there are trends in # the series. loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS) return train_and_predict(ar, csv_file_name, training_steps=600) def train_and_predict(estimator, csv_file_name, training_steps): """A simple example of training and predicting.""" # Read data in the default "time,value" CSV format with no header reader = tf.contrib.timeseries.CSVReader(csv_file_name) # Set up windowing and batching for training train_input_fn = tf.contrib.timeseries.RandomWindowInputFn( reader, batch_size=16, window_size=16) # Fit model parameters to data estimator.train(input_fn=train_input_fn, steps=training_steps) # Evaluate on the full dataset sequentially, collecting in-sample predictions # for a qualitative evaluation. Note that this loads the whole dataset into # memory. For quantitative evaluation, use RandomWindowChunker. evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader) evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1) # Predict starting after the evaluation (predictions,) = tuple(estimator.predict( input_fn=tf.contrib.timeseries.predict_continuation_input_fn( evaluation, steps=200))) times = evaluation["times"][0] observed = evaluation["observed"][0, :, 0] mean = np.squeeze(np.concatenate( [evaluation["mean"][0], predictions["mean"]], axis=0)) variance = np.squeeze(np.concatenate( [evaluation["covariance"][0], predictions["covariance"]], axis=0)) all_times = np.concatenate([times, predictions["times"]], axis=0) upper_limit = mean + np.sqrt(variance) lower_limit = mean - np.sqrt(variance) return times, observed, all_times, mean, upper_limit, lower_limit def make_plot(name, training_times, observed, all_times, mean, upper_limit, lower_limit): """Plot a time series in a new figure.""" pyplot.figure() pyplot.plot(training_times, observed, "b", label="training series") pyplot.plot(all_times, mean, "r", label="forecast") pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound") pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound") pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey", alpha="0.2") pyplot.axvline(training_times[-1], color="k", linestyle="--") pyplot.xlabel("time") pyplot.ylabel("observations") pyplot.legend(loc=0) pyplot.title(name) def main(unused_argv): if not HAS_MATPLOTLIB: raise ImportError( "Please install matplotlib to generate a plot from this example.") make_plot("Structural ensemble", *structural_ensemble_train_and_predict(FLAGS.input_filename)) make_plot("AR", *ar_train_and_predict(FLAGS.input_filename)) pyplot.show() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--input_filename", type=str, required=True, help="Input csv file.") FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
alanlhutchison/JTK
jtk.py
1
13679
#!/usr/bin/env python """ You should probably be calling this from wrap_jtk.sh Write a JTK that does the following: 1) Takes in a list of ZTs and gene values 2) Allows a choice of waveform 3) Allows a choice of phase and period 4) Takes the number of available points for a given gene and calculates the null distribution for that set of timepoints 5) Calculates the Kendall's tau between the time points and the Null distribution """ VERSION="0.0" from scipy.stats import kendalltau from operator import itemgetter import numpy as np import sys import argparse import matplotlib.pyplot as plt import matplotlib.cm as cm from scipy.stats import norm def main(args): fn = args.filename waveform = args.function period = args.period phase = args.phase eff_amp = 1 updated = read_in(fn) header,series =organize_data(updated) reference = generate_base_reference(header,waveform,phase,period) #print "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}".format("#ID","waveform","period","phase","eff_amp","tau","p") RealKen = KendallTauP() for serie in series: geneID,tau,p = generate_mod_series(reference,serie,RealKen) print "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}".format(geneID,waveform,period,phase,eff_amp,tau,p) def read_in(fn): """Reads in a file in correct '#\tZTX_X\tZTX_X\tZTX_X\n geneID\tvalue\tvalue' Returns a list of lists of lines with replicates combined Correctly deals with the NA case: If NA is part of a replicate, ignore from mean If NA is alone or all replicates, propagate through to output""" updated = [] with open(fn,'r') as f: master_match=[] for line in f: words = line.strip().split() words=[word.strip() for word in words] if words[0] =="#": match_searched = [] #print words for i in xrange(1,len(words)): match_searched.append(i) match = [i] for j in xrange(i+1,len(words)): if j not in match_searched: if words[i].split('_')[0] == words[j].split('_')[0]: match.append(j) match_searched.append(j) if len(match) > 1: master_match.append(match) #print master_match new = [words[0]] to_collapse = set([m for match in master_match for m in match]) to_check = set(range(1,len(words))) for i in range(1,len(words)): if i in to_check: if i in to_collapse: for match in master_match: if i in match: sum = 0 if "ZT" in line: sum = words[i].split('_')[0] else: NAflag = 0 for m in match: if words[m].strip()!="NA": sum += float(words[m]) else: NAflag += 1 if NAflag != len(match): sum = sum / float(len(match)-NAflag) elif NAflag==len(match): sum = "NA" new.append(sum) to_collapse = to_collapse.difference(match) to_check = to_check.difference(match) else: if "ZT" in line: new.append(words[i].split('_')[0]) else: if words[i]!="NA": new.append(float(words[i])) else: new.append(words[i]) #print "new is",new updated.append(new) # for update in updated: # print update return updated def organize_data(updated): """ Organize list of lists from such that genes with similar time-series holes match (for null distribution calc) Return a header ['#','ZTX','ZTY'...] and a list of lists [ lists with similar holes (identical null distribution) , [],[],[]] """ header = updated[0] L = updated[1:] for i in xrange(1,len(header)): L=sorted(L, key=itemgetter(i)) # print "Header is" # print header # for line in L: # print line return header,L def generate_base_reference(header,waveform="cosine",phase=0,period=24): """ This will generate a waveform with a given phase and period based on the header, """ tpoints = [] ZTs = header[1:] coef = 2.0 * np.pi / float(period) for ZT in ZTs: z = ZT[2:] tpoints.append( (float(z)+float(phase) ) * coef) #print tpoints #print [tpoint/np.pi/2.0 for tpoint in tpoints] if waveform == "cosine": reference=np.cos(tpoints) elif waveform == "impulse": reference=np.cos(tpoints) elif waveform == "rampup": reference=np.cos(tpoints) elif waveform == "rampdown": reference=np.cos(tpoints) elif waveform == "step": reference=np.cos(tpoints) return reference def generate_mod_series(reference,series,RealKen): """ Takes the series from generate_base_null, takes the list from data, and makes a null for each gene in data or uses the one previously calculated. Then it runs Kendall's Tau on the exp. series against the null """ geneID = series[0] values = series[1:] binary = [1 if value!="NA" else np.nan for value in values] temp = reference*binary mod_reference = [value for value in temp if not np.isnan(value)] mod_values = [value for value in values if value!='NA'] # print reference # print temp # print mod_reference # print mod_values if len(mod_values) < 3: tau,p = np.nan,np.nan elif mod_values.count(np.nan) == len(mod_values): tau,p = np.nan,np.nan elif mod_values.count(0) == len(mod_values): tau,p = np.nan,np.nan elif sum(mod_values)<0.00001: tau,p = np.nan,np.nan else: tau,p=kendalltau(mod_values,mod_reference) if not np.isnan(tau): pk = RealKen.pval(tau,len(mod_values)) if pk!=None: p=pk #print tau,p return geneID,tau,p def __create_parser__(): p = argparse.ArgumentParser( description="python script runner for JTK_CYCLE statistical test", epilog="...", version=VERSION ) p.add_argument("-t", "--test", action='store_true', default=False, help="run the Python unittest testing suite") p.add_argument("-f", "--file", dest="filename", action='store', metavar="FILENM", type=str, help="give a filename else this thang won't run") analysis = p.add_argument_group(title="JTK_CYCLE analysis options") analysis.add_argument("--function", dest="function", type=str, metavar="$FUNC_STR", action='store', default="cosine", choices=["cosine","rampup","rampdown","step","impulse"], help="cosine (dflt), rampup, rampdown, impulse, step") analysis.add_argument("-w", "--width", dest="width", type=float, metavar="W", action='store', default=0.75, help="shape parameter for alt. waveforms \in [0,1]") analysis.add_argument("-ph", "--phase", dest="phase", metavar="P", type=float, default=0.0, help="set phase of reference waveform (dflt: 0.0)") analysis.add_argument("-p","--period", dest="period", metavar=float, type=float, action='store', help="set period to be searched") distribution = analysis.add_mutually_exclusive_group(required=False) distribution.add_argument("-e", "--exact", dest="harding", action='store_true', default=False, help="use Harding's exact null distribution (dflt)") distribution.add_argument("-n", "--normal", dest="normal", action='store_true', default=False, help="use normal approximation to null distribution") return p # instantiate class to precalculate distribution # usage: # K = KendallTauP() # pval = K.pval(tau,n,two_tailed=True) class KendallTauP: def __init__(self,N=25): # largest number of samples to precompute self.N = N Nint = self.N*(self.N-1)/2 # first allocate freq slots for largest sample array # as we fill this in we'll save the results for smaller samples # total possible number of inversions is Nint + 1 freqN = np.zeros(Nint + 1) freqN[0] = 1.0 # save results at each step in freqs array self.freqs = [np.array([1.0])] for i in xrange(1,self.N): last = np.copy(freqN) for j in xrange(Nint+1): # update each entry by summing over i entries to the left freqN[j] += sum(last[max(0,j-i):j]) # copy current state into freqs array # the kth entry of freqs should have 1+k*(k-1)/2 entries self.freqs.append(np.copy(freqN[0:(1+(i+1)*i/2)])) # turn freqs into cdfs # distributions still with respect to number of inversions self.cdfs = [] for i in xrange(self.N): self.cdfs.append(np.copy(self.freqs[i])) # turn into cumulative frequencies for j in xrange(1,len(self.freqs[i])): self.cdfs[i][j] += self.cdfs[i][j-1] # convert freqs to probs self.cdfs[i] = self.cdfs[i]/sum(self.freqs[i]) # plot exact distribution compared to normal approx def plot(self,nlist): colors = cm.Set1(np.linspace(0,1,len(nlist))) # for plotting gaussian x = np.linspace(-1.2,1.2,300) # plot pdfs plt.figure() for i in xrange(len(nlist)): ntot = len(self.freqs[nlist[i]-1])-1 tauvals = (ntot - 2.0*np.arange(len(self.freqs[nlist[i]-1])))/ntot probs = ((ntot+1.0)/2.0)*self.freqs[nlist[i]-1]/sum(self.freqs[nlist[i]-1]) plt.scatter(tauvals,probs,color=colors[i]) # now plot gaussian comparison var = 2.0*(2.0*nlist[i]+5.0)/(nlist[i]*(nlist[i]-1)*9.0) plt.plot(x,norm.pdf(x,0.0,np.sqrt(var)),color=colors[i]) plt.legend(nlist,loc='best') # plt.savefig('pdfs.png') plt.show() # now plot cdfs plt.figure() for i in xrange(len(nlist)): ntot = len(self.freqs[nlist[i]-1])-1 tauvals = -1.0*(ntot - 2.0*np.arange(len(self.freqs[nlist[i]-1])))/ntot probs = self.cdfs[nlist[i]-1] plt.scatter(tauvals,probs,color=colors[i]) # now plot gaussian comparison var = 2.0*(2.0*nlist[i]+5.0)/(nlist[i]*(nlist[i]-1)*9.0) plt.plot(x,norm.cdf(x,0.0,np.sqrt(var)),color=colors[i]) plt.legend(nlist,loc='best') # plt.savefig('cdfs.png') plt.show() # use cdfs to return pval # default to return two tailed pval def pval(self,tau,n,two_tailed=True): # enforce tau is between -1 and 1 if tau < -1.0 or tau > 1.0: sys.stderr.write(str(tau)+"\n") sys.stderr.write("invalid tau\n") #print 'invalid tau' return None # enforce n is less than our precomputed quantities if n > self.N: #print 'n is too large' sys.stderr.write("n is too large/n") return None # convert tau to value in terms of number of inversions ntot = n*(n-1)/2 inv_score = int(round((ntot - tau * ntot)/2.0)) # I'm a little worried about the precision of this, # but probably not enough to be really worried for reasonable n # since we really only need precision to resolve ntot points # if two tailed, we're getting a tail from a symmetric dist min_inv_score = min(inv_score,ntot-inv_score) if two_tailed: pval = self.cdfs[n-1][min_inv_score]*2.0 else: # if one tailed return prob of getting that or fewer inversions pval = self.cdfs[n-1][inv_score] # if inv_score is 0, might have larger than 0.5 prob return min(pval,1.0) if __name__=="__main__": parser = __create_parser__() args = parser.parse_args() main(args)
mit
darionyaphet/flink
flink-python/pyflink/table/table.py
2
40859
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ import warnings from py4j.java_gateway import get_method from typing import Union from pyflink.java_gateway import get_gateway from pyflink.table import ExplainDetail from pyflink.table.expression import Expression, _get_java_expression from pyflink.table.expressions import col from pyflink.table.serializers import ArrowSerializer from pyflink.table.table_result import TableResult from pyflink.table.table_schema import TableSchema from pyflink.table.types import create_arrow_schema from pyflink.table.utils import tz_convert_from_internal, to_expression_jarray from pyflink.table.window import OverWindow, GroupWindow from pyflink.util.utils import to_jarray from pyflink.util.utils import to_j_explain_detail_arr __all__ = ['Table', 'GroupedTable', 'GroupWindowedTable', 'OverWindowedTable', 'WindowGroupedTable'] class Table(object): """ A :class:`~pyflink.table.Table` is the core component of the Table API. Similar to how the batch and streaming APIs have DataSet and DataStream, the Table API is built around :class:`~pyflink.table.Table`. Use the methods of :class:`~pyflink.table.Table` to transform data. Example: :: >>> env = StreamExecutionEnvironment.get_execution_environment() >>> env.set_parallelism(1) >>> t_env = StreamTableEnvironment.create(env) >>> ... >>> t_env.register_table_source("source", ...) >>> t = t_env.from_path("source") >>> t.select(...) >>> ... >>> t_env.register_table_sink("result", ...) >>> t.execute_insert("result") Operations such as :func:`~pyflink.table.Table.join`, :func:`~pyflink.table.Table.select`, :func:`~pyflink.table.Table.where` and :func:`~pyflink.table.Table.group_by` take arguments in an expression string. Please refer to the documentation for the expression syntax. """ def __init__(self, j_table, t_env): self._j_table = j_table self._t_env = t_env def __str__(self): return self._j_table.toString() def __getattr__(self, name) -> Expression: """ Returns the :class:`Expression` of the column `name`. Example: :: >>> tab.select(tab.a) """ if name not in self.get_schema().get_field_names(): raise AttributeError( "The current table has no column named '%s', available columns: [%s]" % (name, ', '.join(self.get_schema().get_field_names()))) return col(name) def select(self, *fields: Union[str, Expression]): """ Performs a selection operation. Similar to a SQL SELECT statement. The field expressions can contain complex expressions. Example: :: >>> from pyflink.table import expressions as expr >>> tab.select(tab.key, expr.concat(tab.value, 'hello')) >>> tab.select(expr.col('key'), expr.concat(expr.col('value'), 'hello')) >>> tab.select("key, value + 'hello'") :return: The result table. :rtype: pyflink.table.Table """ if all(isinstance(f, Expression) for f in fields): return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return Table(self._j_table.select(fields[0]), self._t_env) def alias(self, field: str, *fields: str): """ Renames the fields of the expression result. Use this to disambiguate fields before joining two tables. Example: :: >>> tab.alias("a", "b", "c") >>> tab.alias("a, b, c") :param field: Field alias. :param fields: Additional field aliases. :return: The result table. :rtype: pyflink.table.Table """ gateway = get_gateway() extra_fields = to_jarray(gateway.jvm.String, fields) return Table(get_method(self._j_table, "as")(field, extra_fields), self._t_env) def filter(self, predicate: Union[str, Expression[bool]]): """ Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE clause. Example: :: >>> tab.filter(tab.name == 'Fred') >>> tab.filter("name = 'Fred'") :param predicate: Predicate expression string. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.filter(_get_java_expression(predicate)), self._t_env) def where(self, predicate: Union[str, Expression[bool]]): """ Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE clause. Example: :: >>> tab.where(tab.name == 'Fred') >>> tab.where("name = 'Fred'") :param predicate: Predicate expression string. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.where(_get_java_expression(predicate)), self._t_env) def group_by(self, *fields: Union[str, Expression]): """ Groups the elements on some grouping keys. Use this before a selection with aggregations to perform the aggregation on a per-group basis. Similar to a SQL GROUP BY statement. Example: :: >>> tab.group_by(tab.key).select(tab.key, tab.value.avg) >>> tab.group_by("key").select("key, value.avg") :param fields: Group keys. :return: The grouped table. :rtype: pyflink.table.GroupedTable """ if all(isinstance(f, Expression) for f in fields): return GroupedTable(self._j_table.groupBy(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return GroupedTable(self._j_table.groupBy(fields[0]), self._t_env) def distinct(self): """ Removes duplicate values and returns only distinct (different) values. Example: :: >>> tab.select(tab.key, tab.value).distinct() :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.distinct(), self._t_env) def join(self, right: 'Table', join_predicate: Union[str, Expression[bool]] = None): """ Joins two :class:`~pyflink.table.Table`. Similar to a SQL join. The fields of the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if necessary. You can use where and select clauses after a join to further specify the behaviour of the join. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` . Example: :: >>> left.join(right).where((left.a == right.b) && (left.c > 3)) >>> left.join(right).where("a = b && c > 3") >>> left.join(right, left.a == right.b) :param right: Right table. :param join_predicate: Optional, the join predicate expression string. :return: The result table. :rtype: pyflink.table.Table """ if join_predicate is not None: return Table(self._j_table.join( right._j_table, _get_java_expression(join_predicate)), self._t_env) else: return Table(self._j_table.join(right._j_table), self._t_env) def left_outer_join(self, right: 'Table', join_predicate: Union[str, Expression[bool]] = None): """ Joins two :class:`~pyflink.table.Table`. Similar to a SQL left outer join. The fields of the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if necessary. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` and its :class:`~pyflink.table.TableConfig` must have null check enabled (default). Example: :: >>> left.left_outer_join(right) >>> left.left_outer_join(right, left.a == right.b) >>> left.left_outer_join(right, "a = b") :param right: Right table. :param join_predicate: Optional, the join predicate expression string. :return: The result table. :rtype: pyflink.table.Table """ if join_predicate is None: return Table(self._j_table.leftOuterJoin(right._j_table), self._t_env) else: return Table(self._j_table.leftOuterJoin( right._j_table, _get_java_expression(join_predicate)), self._t_env) def right_outer_join(self, right: 'Table', join_predicate: Union[str, Expression[bool]]): """ Joins two :class:`~pyflink.table.Table`. Similar to a SQL right outer join. The fields of the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if necessary. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` and its :class:`~pyflink.table.TableConfig` must have null check enabled (default). Example: :: >>> left.right_outer_join(right, left.a == right.b) >>> left.right_outer_join(right, "a = b") :param right: Right table. :param join_predicate: The join predicate expression string. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.rightOuterJoin( right._j_table, _get_java_expression(join_predicate)), self._t_env) def full_outer_join(self, right: 'Table', join_predicate: Union[str, Expression[bool]]): """ Joins two :class:`~pyflink.table.Table`. Similar to a SQL full outer join. The fields of the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if necessary. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` and its :class:`~pyflink.table.TableConfig` must have null check enabled (default). Example: :: >>> left.full_outer_join(right, left.a == right.b) >>> left.full_outer_join(right, "a = b") :param right: Right table. :param join_predicate: The join predicate expression string. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.fullOuterJoin( right._j_table, _get_java_expression(join_predicate)), self._t_env) def join_lateral(self, table_function_call: Union[str, Expression], join_predicate: Union[str, Expression[bool]] = None): """ Joins this Table with an user-defined TableFunction. This join is similar to a SQL inner join but works with a table function. Each row of the table is joined with the rows produced by the table function. Example: :: >>> t_env.create_java_temporary_system_function("split", ... "java.table.function.class.name") >>> tab.join_lateral("split(text, ' ') as (b)", "a = b") >>> from pyflink.table import expressions as expr >>> tab.join_lateral(expr.call('split', ' ').alias('b'), expr.col('a') == expr.col('b')) :param table_function_call: An expression representing a table function call. :param join_predicate: Optional, The join predicate expression string, join ON TRUE if not exist. :return: The result Table. :rtype: pyflink.table.Table """ if join_predicate is None: return Table(self._j_table.joinLateral( _get_java_expression(table_function_call)), self._t_env) else: return Table(self._j_table.joinLateral( _get_java_expression(table_function_call), _get_java_expression(join_predicate)), self._t_env) def left_outer_join_lateral(self, table_function_call: Union[str, Expression], join_predicate: Union[str, Expression[bool]] = None): """ Joins this Table with an user-defined TableFunction. This join is similar to a SQL left outer join but works with a table function. Each row of the table is joined with all rows produced by the table function. If the join does not produce any row, the outer row is padded with nulls. Example: :: >>> t_env.create_java_temporary_system_function("split", ... "java.table.function.class.name") >>> tab.left_outer_join_lateral("split(text, ' ') as (b)") >>> from pyflink.table import expressions as expr >>> tab.left_outer_join_lateral(expr.call('split', ' ').alias('b')) :param table_function_call: An expression representing a table function call. :param join_predicate: Optional, The join predicate expression string, join ON TRUE if not exist. :return: The result Table. :rtype: pyflink.table.Table """ if join_predicate is None: return Table(self._j_table.leftOuterJoinLateral( _get_java_expression(table_function_call)), self._t_env) else: return Table(self._j_table.leftOuterJoinLateral( _get_java_expression(table_function_call), _get_java_expression(join_predicate)), self._t_env) def minus(self, right: 'Table'): """ Minus of two :class:`~pyflink.table.Table` with duplicate records removed. Similar to a SQL EXCEPT clause. Minus returns records from the left table that do not exist in the right table. Duplicate records in the left table are returned exactly once, i.e., duplicates are removed. Both tables must have identical field types. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`. Example: :: >>> left.minus(right) :param right: Right table. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.minus(right._j_table), self._t_env) def minus_all(self, right: 'Table'): """ Minus of two :class:`~pyflink.table.Table`. Similar to a SQL EXCEPT ALL. Similar to a SQL EXCEPT ALL clause. MinusAll returns the records that do not exist in the right table. A record that is present n times in the left table and m times in the right table is returned (n - m) times, i.e., as many duplicates as are present in the right table are removed. Both tables must have identical field types. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`. Example: :: >>> left.minus_all(right) :param right: Right table. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.minusAll(right._j_table), self._t_env) def union(self, right: 'Table'): """ Unions two :class:`~pyflink.table.Table` with duplicate records removed. Similar to a SQL UNION. The fields of the two union operations must fully overlap. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`. Example: :: >>> left.union(right) :param right: Right table. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.union(right._j_table), self._t_env) def union_all(self, right: 'Table'): """ Unions two :class:`~pyflink.table.Table`. Similar to a SQL UNION ALL. The fields of the two union operations must fully overlap. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`. Example: :: >>> left.union_all(right) :param right: Right table. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.unionAll(right._j_table), self._t_env) def intersect(self, right: 'Table'): """ Intersects two :class:`~pyflink.table.Table` with duplicate records removed. Intersect returns records that exist in both tables. If a record is present in one or both tables more than once, it is returned just once, i.e., the resulting table has no duplicate records. Similar to a SQL INTERSECT. The fields of the two intersect operations must fully overlap. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`. Example: :: >>> left.intersect(right) :param right: Right table. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.intersect(right._j_table), self._t_env) def intersect_all(self, right: 'Table'): """ Intersects two :class:`~pyflink.table.Table`. IntersectAll returns records that exist in both tables. If a record is present in both tables more than once, it is returned as many times as it is present in both tables, i.e., the resulting table might have duplicate records. Similar to an SQL INTERSECT ALL. The fields of the two intersect operations must fully overlap. .. note:: Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`. Example: :: >>> left.intersect_all(right) :param right: Right table. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.intersectAll(right._j_table), self._t_env) def order_by(self, *fields: Union[str, Expression]): """ Sorts the given :class:`~pyflink.table.Table`. Similar to SQL ORDER BY. The resulting Table is sorted globally sorted across all parallel partitions. Example: :: >>> tab.order_by(tab.name.desc) >>> tab.order_by("name.desc") For unbounded tables, this operation requires a sorting on a time attribute or a subsequent fetch operation. :param fields: Order fields expression string. :return: The result table. :rtype: pyflink.table.Table """ if all(isinstance(f, Expression) for f in fields): return Table(self._j_table.orderBy(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return Table(self._j_table.orderBy(fields[0]), self._t_env) def offset(self, offset: int): """ Limits a (possibly sorted) result from an offset position. This method can be combined with a preceding :func:`~pyflink.table.Table.order_by` call for a deterministic order and a subsequent :func:`~pyflink.table.Table.fetch` call to return n rows after skipping the first o rows. Example: :: # skips the first 3 rows and returns all following rows. >>> tab.order_by(tab.name.desc).offset(3) >>> tab.order_by("name.desc").offset(3) # skips the first 10 rows and returns the next 5 rows. >>> tab.order_by(tab.name.desc).offset(10).fetch(5) For unbounded tables, this operation requires a subsequent fetch operation. :param offset: Number of records to skip. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.offset(offset), self._t_env) def fetch(self, fetch: int): """ Limits a (possibly sorted) result to the first n rows. This method can be combined with a preceding :func:`~pyflink.table.Table.order_by` call for a deterministic order and :func:`~pyflink.table.Table.offset` call to return n rows after skipping the first o rows. Example: Returns the first 3 records. :: >>> tab.order_by(tab.name.desc).fetch(3) >>> tab.order_by("name.desc").fetch(3) Skips the first 10 rows and returns the next 5 rows. :: >>> tab.order_by(tab.name.desc).offset(10).fetch(5) :param fetch: The number of records to return. Fetch must be >= 0. :return: The result table. :rtype: pyflink.table.Table """ return Table(self._j_table.fetch(fetch), self._t_env) def limit(self, fetch: int, offset: int = 0): """ Limits a (possibly sorted) result to the first n rows. This method is a synonym for :func:`~pyflink.table.Table.offset` followed by :func:`~pyflink.table.Table.fetch`. Example: Returns the first 3 records. :: >>> tab.limit(3) Skips the first 10 rows and returns the next 5 rows. :: >>> tab.limit(5, 10) :param fetch: the first number of rows to fetch. :param offset: the number of records to skip, default 0. :return: The result table. """ return self.offset(offset).fetch(fetch) def window(self, window: GroupWindow): """ Defines group window on the records of a table. A group window groups the records of a table by assigning them to windows defined by a time or row interval. For streaming tables of infinite size, grouping into windows is required to define finite groups on which group-based aggregates can be computed. For batch tables of finite size, windowing essentially provides shortcuts for time-based groupBy. .. note:: Computing windowed aggregates on a streaming table is only a parallel operation if additional grouping attributes are added to the :func:`~pyflink.table.GroupWindowedTable.group_by` clause. If the :func:`~pyflink.table.GroupWindowedTable.group_by` only references a GroupWindow alias, the streamed table will be processed by a single task, i.e., with parallelism 1. Example: :: >>> from pyflink.table import expressions as expr >>> tab.window(Tumble.over(expr.lit(10).minutes).on(tab.rowtime).alias('w')) \\ ... .group_by(col('w')) \\ ... .select(tab.a.sum.alias('a'), ... col('w').start.alias('b'), ... col('w').end.alias('c'), ... col('w').rowtime.alias('d')) :param window: A :class:`~pyflink.table.window.GroupWindow` created from :class:`~pyflink.table.window.Tumble`, :class:`~pyflink.table.window.Session` or :class:`~pyflink.table.window.Slide`. :return: A group windowed table. :rtype: GroupWindowedTable """ return GroupWindowedTable(self._j_table.window(window._java_window), self._t_env) def over_window(self, *over_windows: OverWindow): """ Defines over-windows on the records of a table. An over-window defines for each record an interval of records over which aggregation functions can be computed. Example: :: >>> from pyflink.table import expressions as expr >>> tab.over_window(Over.partition_by(tab.c).order_by(tab.rowtime) \\ ... .preceding(lit(10).seconds).alias("ow")) \\ ... .select(tab.c, tab.b.count.over(col('ow'), tab.e.sum.over(col('ow')))) .. note:: Computing over window aggregates on a streaming table is only a parallel operation if the window is partitioned. Otherwise, the whole stream will be processed by a single task, i.e., with parallelism 1. .. note:: Over-windows for batch tables are currently not supported. :param over_windows: over windows created from :class:`~pyflink.table.window.Over`. :return: A over windowed table. :rtype: pyflink.table.OverWindowedTable """ gateway = get_gateway() window_array = to_jarray(gateway.jvm.OverWindow, [item._java_over_window for item in over_windows]) return OverWindowedTable(self._j_table.window(window_array), self._t_env) def add_columns(self, *fields: Union[str, Expression]): """ Adds additional columns. Similar to a SQL SELECT statement. The field expressions can contain complex expressions, but can not contain aggregations. It will throw an exception if the added fields already exist. Example: :: >>> from pyflink.table import expressions as expr >>> tab.add_columns((tab.a + 1).alias('a1'), expr.concat(tab.b, 'sunny').alias('b1')) >>> tab.add_columns("a + 1 as a1, concat(b, 'sunny') as b1") :param fields: Column list string. :return: The result table. :rtype: pyflink.table.Table """ if all(isinstance(f, Expression) for f in fields): return Table(self._j_table.addColumns(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return Table(self._j_table.addColumns(fields[0]), self._t_env) def add_or_replace_columns(self, *fields: Union[str, Expression]): """ Adds additional columns. Similar to a SQL SELECT statement. The field expressions can contain complex expressions, but can not contain aggregations. Existing fields will be replaced if add columns name is the same as the existing column name. Moreover, if the added fields have duplicate field name, then the last one is used. Example: :: >>> from pyflink.table import expressions as expr >>> tab.add_or_replace_columns((tab.a + 1).alias('a1'), ... expr.concat(tab.b, 'sunny').alias('b1')) >>> tab.add_or_replace_columns("a + 1 as a1, concat(b, 'sunny') as b1") :param fields: Column list string. :return: The result table. :rtype: pyflink.table.Table """ if all(isinstance(f, Expression) for f in fields): return Table(self._j_table.addOrReplaceColumns(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return Table(self._j_table.addOrReplaceColumns(fields[0]), self._t_env) def rename_columns(self, *fields: Union[str, Expression]): """ Renames existing columns. Similar to a field alias statement. The field expressions should be alias expressions, and only the existing fields can be renamed. Example: :: >>> tab.rename_columns(tab.a.alias('a1'), tab.b.alias('b1')) >>> tab.rename_columns("a as a1, b as b1") :param fields: Column list string. :return: The result table. :rtype: pyflink.table.Table """ if all(isinstance(f, Expression) for f in fields): return Table(self._j_table.renameColumns(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return Table(self._j_table.renameColumns(fields[0]), self._t_env) def drop_columns(self, *fields: Union[str, Expression]): """ Drops existing columns. The field expressions should be field reference expressions. Example: :: >>> tab.drop_columns(tab.a, tab.b) >>> tab.drop_columns("a, b") :param fields: Column list string. :return: The result table. :rtype: pyflink.table.Table """ if all(isinstance(f, Expression) for f in fields): return Table(self._j_table.dropColumns(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return Table(self._j_table.dropColumns(fields[0]), self._t_env) def insert_into(self, table_path: str): """ Writes the :class:`~pyflink.table.Table` to a :class:`~pyflink.table.TableSink` that was registered under the specified name. For the path resolution algorithm see :func:`~TableEnvironment.use_database`. Example: :: >>> tab.insert_into("sink") :param table_path: The path of the registered :class:`~pyflink.table.TableSink` to which the :class:`~pyflink.table.Table` is written. .. note:: Deprecated in 1.11. Use :func:`execute_insert` for single sink, use :class:`TableTableEnvironment`#:func:`create_statement_set` for multiple sinks. """ warnings.warn("Deprecated in 1.11. Use execute_insert for single sink, " "use TableTableEnvironment#create_statement_set for multiple sinks.", DeprecationWarning) self._j_table.insertInto(table_path) def to_pandas(self): """ Converts the table to a pandas DataFrame. It will collect the content of the table to the client side and so please make sure that the content of the table could fit in memory before calling this method. Example: :: >>> pdf = pd.DataFrame(np.random.rand(1000, 2)) >>> table = table_env.from_pandas(pdf, ["a", "b"]) >>> table.filter(table.a > 0.5).to_pandas() :return: the result pandas DataFrame. .. versionadded:: 1.11.0 """ self._t_env._before_execute() gateway = get_gateway() max_arrow_batch_size = self._j_table.getTableEnvironment().getConfig().getConfiguration()\ .getInteger(gateway.jvm.org.apache.flink.python.PythonOptions.MAX_ARROW_BATCH_SIZE) batches = gateway.jvm.org.apache.flink.table.runtime.arrow.ArrowUtils\ .collectAsPandasDataFrame(self._j_table, max_arrow_batch_size) if batches.hasNext(): import pytz timezone = pytz.timezone( self._j_table.getTableEnvironment().getConfig().getLocalTimeZone().getId()) serializer = ArrowSerializer( create_arrow_schema(self.get_schema().get_field_names(), self.get_schema().get_field_data_types()), self.get_schema().to_row_data_type(), timezone) import pyarrow as pa table = pa.Table.from_batches(serializer.load_from_iterator(batches)) pdf = table.to_pandas() schema = self.get_schema() for field_name in schema.get_field_names(): pdf[field_name] = tz_convert_from_internal( pdf[field_name], schema.get_field_data_type(field_name), timezone) return pdf else: import pandas as pd return pd.DataFrame.from_records([], columns=self.get_schema().get_field_names()) def get_schema(self): """ Returns the :class:`~pyflink.table.TableSchema` of this table. :return: The schema of this table. :rtype: pyflink.table.TableSchema """ return TableSchema(j_table_schema=self._j_table.getSchema()) def print_schema(self): """ Prints the schema of this table to the console in a tree format. """ self._j_table.printSchema() def execute_insert(self, table_path: str, overwrite: bool = False): """ Writes the :class:`~pyflink.table.Table` to a :class:`~pyflink.table.TableSink` that was registered under the specified name, and then execute the insert operation. For the path resolution algorithm see :func:`~TableEnvironment.use_database`. Example: :: >>> tab.execute_insert("sink") :param table_path: The path of the registered :class:`~pyflink.table.TableSink` to which the :class:`~pyflink.table.Table` is written. :param overwrite: The flag that indicates whether the insert should overwrite existing data or not. :return: The table result. .. versionadded:: 1.11.0 """ self._t_env._before_execute() return TableResult(self._j_table.executeInsert(table_path, overwrite)) def execute(self): """ Collects the contents of the current table local client. Example: :: >>> tab.execute() :return: The content of the table. .. versionadded:: 1.11.0 """ self._t_env._before_execute() return TableResult(self._j_table.execute()) def explain(self, *extra_details: ExplainDetail) -> str: """ Returns the AST of this table and the execution plan. :param extra_details: The extra explain details which the explain result should include, e.g. estimated cost, changelog mode for streaming :return: The statement for which the AST and execution plan will be returned. .. versionadded:: 1.11.0 """ j_extra_details = to_j_explain_detail_arr(extra_details) return self._j_table.explain(j_extra_details) class GroupedTable(object): """ A table that has been grouped on a set of grouping keys. """ def __init__(self, java_table, t_env): self._j_table = java_table self._t_env = t_env def select(self, *fields: Union[str, Expression]): """ Performs a selection operation on a grouped table. Similar to an SQL SELECT statement. The field expressions can contain complex expressions and aggregations. Example: :: >>> tab.group_by(tab.key).select(tab.key, tab.value.avg.alias('average')) >>> tab.group_by("key").select("key, value.avg as average") :param fields: Expression string that contains group keys and aggregate function calls. :return: The result table. :rtype: pyflink.table.Table """ if all(isinstance(f, Expression) for f in fields): return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return Table(self._j_table.select(fields[0]), self._t_env) class GroupWindowedTable(object): """ A table that has been windowed for :class:`~pyflink.table.GroupWindow`. """ def __init__(self, java_group_windowed_table, t_env): self._j_table = java_group_windowed_table self._t_env = t_env def group_by(self, *fields: Union[str, Expression]): """ Groups the elements by a mandatory window and one or more optional grouping attributes. The window is specified by referring to its alias. If no additional grouping attribute is specified and if the input is a streaming table, the aggregation will be performed by a single task, i.e., with parallelism 1. Aggregations are performed per group and defined by a subsequent :func:`~pyflink.table.WindowGroupedTable.select` clause similar to SQL SELECT-GROUP-BY query. Example: :: >>> from pyflink.table import expressions as expr >>> tab.window(Tumble.over(expr.lit(10).minutes).on(tab.rowtime).alias('w')) \\ ... .group_by(col('w')) \\ ... .select(tab.a.sum.alias('a'), ... col('w').start.alias('b'), ... col('w').end.alias('c'), ... col('w').rowtime.alias('d')) :param fields: Group keys. :return: A window grouped table. :rtype: pyflink.table.WindowGroupedTable """ if all(isinstance(f, Expression) for f in fields): return WindowGroupedTable( self._j_table.groupBy(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return WindowGroupedTable(self._j_table.groupBy(fields[0]), self._t_env) class WindowGroupedTable(object): """ A table that has been windowed and grouped for :class:`~pyflink.table.window.GroupWindow`. """ def __init__(self, java_window_grouped_table, t_env): self._j_table = java_window_grouped_table self._t_env = t_env def select(self, *fields: Union[str, Expression]): """ Performs a selection operation on a window grouped table. Similar to an SQL SELECT statement. The field expressions can contain complex expressions and aggregations. Example: :: >>> window_grouped_table.select(col('key'), ... col('window').start, ... col('value').avg.alias('valavg')) >>> window_grouped_table.select("key, window.start, value.avg as valavg") :param fields: Expression string. :return: The result table. :rtype: pyflink.table.Table """ if all(isinstance(f, Expression) for f in fields): return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return Table(self._j_table.select(fields[0]), self._t_env) class OverWindowedTable(object): """ A table that has been windowed for :class:`~pyflink.table.window.OverWindow`. Unlike group windows, which are specified in the GROUP BY clause, over windows do not collapse rows. Instead over window aggregates compute an aggregate for each input row over a range of its neighboring rows. """ def __init__(self, java_over_windowed_table, t_env): self._j_table = java_over_windowed_table self._t_env = t_env def select(self, *fields: Union[str, Expression]): """ Performs a selection operation on a over windowed table. Similar to an SQL SELECT statement. The field expressions can contain complex expressions and aggregations. Example: :: >>> over_windowed_table.select(col('c'), ... col('b').count.over(col('ow')), ... col('e').sum.over(col('ow'))) >>> over_windowed_table.select("c, b.count over ow, e.sum over ow") :param fields: Expression string. :return: The result table. :rtype: pyflink.table.Table """ if all(isinstance(f, Expression) for f in fields): return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env) else: assert len(fields) == 1 assert isinstance(fields[0], str) return Table(self._j_table.select(fields[0]), self._t_env)
apache-2.0
lucidfrontier45/scikit-learn
sklearn/linear_model/tests/test_sgd.py
3
26746
import unittest import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import raises from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_equal from sklearn import linear_model, datasets, metrics from sklearn import preprocessing from sklearn.linear_model import SGDClassifier, SGDRegressor from sklearn.base import clone class SparseSGDClassifier(SGDClassifier): def fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return SGDClassifier.fit(self, X, y, *args, **kw) def partial_fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return SGDClassifier.partial_fit(self, X, y, *args, **kw) def decision_function(self, X, *args, **kw): X = sp.csr_matrix(X) return SGDClassifier.decision_function(self, X, *args, **kw) def predict_proba(self, X, *args, **kw): X = sp.csr_matrix(X) return SGDClassifier.predict_proba(self, X, *args, **kw) def predict_log_proba(self, X, *args, **kw): X = sp.csr_matrix(X) return SGDClassifier.predict_log_proba(self, X, *args, **kw) class SparseSGDRegressor(SGDRegressor): def fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return SGDRegressor.fit(self, X, y, *args, **kw) def partial_fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return SGDRegressor.partial_fit(self, X, y, *args, **kw) def decision_function(self, X, *args, **kw): X = sp.csr_matrix(X) return SGDRegressor.decision_function(self, X, *args, **kw) ## ## Test Data ## # test sample 1 X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) Y = [1, 1, 1, 2, 2, 2] T = np.array([[-1, -1], [2, 2], [3, 2]]) true_result = [1, 2, 2] # test sample 2; string class labels X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5], [1, 1], [0.75, 0.5], [1.5, 1.5], [-1, -1], [0, -0.5], [1, -1]]) Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3 T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]]) true_result2 = ["one", "two", "three"] # test sample 3 X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]]) Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2]) # test sample 4 - two more or less redundent feature groups X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0], [1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0], [0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1], [0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]]) Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2]) iris = datasets.load_iris() # test sample 5 - test sample 1 as binary classification problem X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) Y5 = [1, 1, 1, 2, 2, 2] true_result5 = [0, 1, 1] ## ## Classification Test Case ## class CommonTest(object): def _test_warm_start(self, lr): # Test that explicit warm restart... clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False, learning_rate=lr) clf.fit(X, Y) clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False, learning_rate=lr) clf2.fit(X, Y, coef_init=clf.coef_.copy(), intercept_init=clf.intercept_.copy()) #... and implicit warm restart are equivalent. clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False, warm_start=True, learning_rate=lr) clf3.fit(X, Y) assert_equal(clf3.t_, clf.t_) assert_array_almost_equal(clf3.coef_, clf.coef_) clf3.set_params(alpha=0.001) clf3.fit(X, Y) assert_equal(clf3.t_, clf2.t_) assert_array_almost_equal(clf3.coef_, clf2.coef_) def test_warm_start_constant(self): self._test_warm_start("constant") def test_warm_start_invscaling(self): self._test_warm_start("invscaling") def test_warm_start_optimal(self): self._test_warm_start("optimal") def test_multiple_fit(self): """Test multiple calls of fit w/ different shaped inputs.""" clf = self.factory(alpha=0.01, n_iter=5, shuffle=False) clf.fit(X, Y) assert_true(hasattr(clf, "coef_")) clf.fit(X[:, :-1], Y) def test_input_format(self): """Input format tests. """ clf = self.factory(alpha=0.01, n_iter=5, shuffle=False) Y_ = np.array(Y)[:, np.newaxis] clf.fit(X, Y_) Y_ = np.c_[Y_, Y_] assert_raises(ValueError, clf.fit, X, Y_) def test_clone(self): """Test whether clone works ok. """ clf = self.factory(alpha=0.01, n_iter=5, penalty='l1') clf = clone(clf) clf.set_params(penalty='l2') clf.fit(X, Y) clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2') clf2.fit(X, Y) assert_array_equal(clf.coef_, clf2.coef_) class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest): """Test suite for the dense representation variant of SGD""" factory = SGDClassifier def test_sgd(self): """Check that SGD gives any results :-)""" for loss in ("hinge", "squared_hinge", "log", "modified_huber"): clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True, loss=loss, n_iter=10, shuffle=True) clf.fit(X, Y) #assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7) assert_array_equal(clf.predict(T), true_result) @raises(ValueError) def test_sgd_bad_l1_ratio(self): """Check whether expected ValueError on bad l1_ratio""" self.factory(l1_ratio=1.1) @raises(ValueError) def test_sgd_bad_learning_rate_schedule(self): """Check whether expected ValueError on bad learning_rate""" self.factory(learning_rate="<unknown>") @raises(ValueError) def test_sgd_bad_eta0(self): """Check whether expected ValueError on bad eta0""" self.factory(eta0=0, learning_rate="constant") @raises(ValueError) def test_sgd_bad_alpha(self): """Check whether expected ValueError on bad alpha""" self.factory(alpha=-.1) @raises(ValueError) def test_sgd_bad_penalty(self): """Check whether expected ValueError on bad penalty""" self.factory(penalty='foobar', l1_ratio=0.85) @raises(ValueError) def test_sgd_bad_loss(self): """Check whether expected ValueError on bad loss""" self.factory(loss="foobar") @raises(ValueError) def test_sgd_n_iter_param(self): """Test parameter validity check""" self.factory(n_iter=-10000) @raises(ValueError) def test_sgd_shuffle_param(self): """Test parameter validity check""" self.factory(shuffle="false") @raises(TypeError) def test_arument_coef(self): """Checks coef_init not allowed as model argument (only fit)""" # Provided coef_ does not match dataset. self.factory(coef_init=np.zeros((3,))).fit(X, Y) @raises(ValueError) def test_provide_coef(self): """Checks coef_init shape for the warm starts""" # Provided coef_ does not match dataset. self.factory().fit(X, Y, coef_init=np.zeros((3,))) @raises(ValueError) def test_set_intercept(self): """Checks intercept_ shape for the warm starts""" # Provided intercept_ does not match dataset. self.factory().fit(X, Y, intercept_init=np.zeros((3,))) def test_set_intercept_binary(self): """Checks intercept_ shape for the warm starts in binary case""" self.factory().fit(X5, Y5, intercept_init=0) def test_set_intercept_to_intercept(self): """Checks intercept_ shape consistency for the warm starts""" # Inconsistent intercept_ shape. clf = self.factory().fit(X5, Y5) self.factory().fit(X5, Y5, intercept_init=clf.intercept_) clf = self.factory().fit(X, Y) self.factory().fit(X, Y, intercept_init=clf.intercept_) @raises(ValueError) def test_sgd_at_least_two_labels(self): """Target must have at least two labels""" self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9)) def test_sgd_multiclass(self): """Multi-class test case""" clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2) assert_equal(clf.coef_.shape, (3, 2)) assert_equal(clf.intercept_.shape, (3,)) assert_equal(clf.decision_function([0, 0]).shape, (1, 3)) pred = clf.predict(T2) assert_array_equal(pred, true_result2) def test_sgd_multiclass_with_init_coef(self): """Multi-class test case""" clf = self.factory(alpha=0.01, n_iter=20) clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3)) assert_equal(clf.coef_.shape, (3, 2)) assert_true(clf.intercept_.shape, (3,)) pred = clf.predict(T2) assert_array_equal(pred, true_result2) def test_sgd_multiclass_njobs(self): """Multi-class test case with multi-core support""" clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2) assert_equal(clf.coef_.shape, (3, 2)) assert_equal(clf.intercept_.shape, (3,)) assert_equal(clf.decision_function([0, 0]).shape, (1, 3)) pred = clf.predict(T2) assert_array_equal(pred, true_result2) def test_set_coef_multiclass(self): """Checks coef_init and intercept_init shape for for multi-class problems""" # Provided coef_ does not match dataset clf = self.factory() assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2))) # Provided coef_ does match dataset clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2))) # Provided intercept_ does not match dataset clf = self.factory() assert_raises(ValueError, clf.fit, X2, Y2, intercept_init=np.zeros((1,))) # Provided intercept_ does match dataset. clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,))) def test_sgd_proba(self): """Check SGD.predict_proba""" # hinge loss does not allow for conditional prob estimate clf = self.factory(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y) assert_raises(NotImplementedError, clf.predict_proba, [3, 2]) # the log and modified_huber losses can output "probability" estimates for loss in ("log", "modified_huber"): clf = self.factory(loss=loss, alpha=0.01, n_iter=10).fit(X, Y) p = clf.predict_proba([3, 2]) assert_true(p[0, 1] > 0.5) p = clf.predict_proba([-1, -1]) assert_true(p[0, 1] < 0.5) p = clf.predict_log_proba([3, 2]) assert_true(p[0, 1] > p[0, 0]) p = clf.predict_log_proba([-1, -1]) assert_true(p[0, 1] < p[0, 0]) def test_sgd_l1(self): """Test L1 regularization""" n = len(X4) rng = np.random.RandomState(13) idx = np.arange(n) rng.shuffle(idx) X = X4[idx, :] Y = Y4[idx, :] clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False, n_iter=2000) clf.fit(X, Y) assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,))) pred = clf.predict(X) assert_array_equal(pred, Y) def test_class_weights(self): """ Test class weights. """ X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False, class_weight=None) clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # we give a small weights to class 1 clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False, class_weight={1: 0.001}) clf.fit(X, y) # now the hyperplane should rotate clock-wise and # the prediction on this point should shift assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) def test_equal_class_weight(self): """Test if equal class weights approx. equals no class weights. """ X = [[1, 0], [1, 0], [0, 1], [0, 1]] y = [0, 0, 1, 1] clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None) clf.fit(X, y) X = [[1, 0], [0, 1]] y = [0, 1] clf_weighted = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5, 1: 0.5}) clf_weighted.fit(X, y) # should be similar up to some epsilon due to learning rate schedule assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2) @raises(ValueError) def test_wrong_class_weight_label(self): """ValueError due to not existing class label.""" clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5}) clf.fit(X, Y) @raises(ValueError) def test_wrong_class_weight_format(self): """ValueError due to wrong class_weight argument type.""" clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5]) clf.fit(X, Y) def test_auto_weight(self): """Test class weights for imbalanced data""" # compute reference metrics on iris dataset that is quite balanced by # default X, y = iris.data, iris.target X = preprocessing.scale(X) idx = np.arange(X.shape[0]) rng = np.random.RandomState(0) rng.shuffle(idx) X = X[idx] y = y[idx] clf = self.factory(alpha=0.0001, n_iter=1000, class_weight=None).fit(X, y) assert_almost_equal(metrics.f1_score(y, clf.predict(X)), 0.96, decimal=1) # make the same prediction using automated class_weight clf_auto = self.factory(alpha=0.0001, n_iter=1000, class_weight="auto").fit(X, y) assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96, decimal=1) # Make sure that in the balanced case it does not change anything # to use "auto" assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6) # build an very very imbalanced dataset out of iris data X_0 = X[y == 0, :] y_0 = y[y == 0] X_imbalanced = np.vstack([X] + [X_0] * 10) y_imbalanced = np.concatenate([y] + [y_0] * 10) # fit a model on the imbalanced data without class weight info clf = self.factory(n_iter=1000, class_weight=None) clf.fit(X_imbalanced, y_imbalanced) y_pred = clf.predict(X) assert_less(metrics.f1_score(y, y_pred), 0.96) # fit a model with auto class_weight enabled clf = self.factory(n_iter=1000, class_weight="auto") clf.fit(X_imbalanced, y_imbalanced) y_pred = clf.predict(X) assert_greater(metrics.f1_score(y, y_pred), 0.96) # fit another using a fit parameter override clf = self.factory(n_iter=1000, class_weight="auto") clf.fit(X_imbalanced, y_imbalanced) y_pred = clf.predict(X) assert_greater(metrics.f1_score(y, y_pred), 0.96) def test_sample_weights(self): """Test weights on individual samples""" X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False) clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # we give a small weights to class 1 clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2) # now the hyperplane should rotate clock-wise and # the prediction on this point should shift assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) @raises(ValueError) def test_wrong_sample_weights(self): """Test if ValueError is raised if sample_weight has wrong shape""" clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False) # provided sample_weight too long clf.fit(X, Y, sample_weight=range(7)) @raises(ValueError) def test_partial_fit_exception(self): clf = self.factory(alpha=0.01) # classes was not specified clf.partial_fit(X3, Y3) def test_partial_fit_binary(self): third = X.shape[0] // 3 clf = self.factory(alpha=0.01) classes = np.unique(Y) clf.partial_fit(X[:third], Y[:third], classes=classes) assert_equal(clf.coef_.shape, (1, X.shape[1])) assert_equal(clf.intercept_.shape, (1,)) assert_equal(clf.decision_function([0, 0]).shape, (1, )) id1 = id(clf.coef_.data) clf.partial_fit(X[third:], Y[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated assert_true(id1, id2) y_pred = clf.predict(T) assert_array_equal(y_pred, true_result) def test_partial_fit_multiclass(self): third = X2.shape[0] // 3 clf = self.factory(alpha=0.01) classes = np.unique(Y2) clf.partial_fit(X2[:third], Y2[:third], classes=classes) assert_equal(clf.coef_.shape, (3, X2.shape[1])) assert_equal(clf.intercept_.shape, (3,)) assert_equal(clf.decision_function([0, 0]).shape, (1, 3)) id1 = id(clf.coef_.data) clf.partial_fit(X2[third:], Y2[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated assert_true(id1, id2) def _test_partial_fit_equal_fit(self, lr): for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)): clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2, learning_rate=lr, shuffle=False) clf.fit(X_, Y_) y_pred = clf.decision_function(T_) t = clf.t_ classes = np.unique(Y_) clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False) for i in range(2): clf.partial_fit(X_, Y_, classes=classes) y_pred2 = clf.decision_function(T_) assert_equal(clf.t_, t) assert_array_almost_equal(y_pred, y_pred2, decimal=2) def test_partial_fit_equal_fit_constant(self): self._test_partial_fit_equal_fit("constant") def test_partial_fit_equal_fit_optimal(self): self._test_partial_fit_equal_fit("optimal") def test_partial_fit_equal_fit_invscaling(self): self._test_partial_fit_equal_fit("invscaling") def test_regression_losses(self): clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.1, loss="epsilon_insensitive") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.1, loss="squared_epsilon_insensitive") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) clf = self.factory(alpha=0.01, loss="huber") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01, loss="squared_loss") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase): """Run exactly the same tests using the sparse representation variant""" factory = SparseSGDClassifier ############################################################################### # Regression Test Case class DenseSGDRegressorTestCase(unittest.TestCase): """Test suite for the dense representation variant of SGD""" factory = SGDRegressor def test_sgd(self): """Check that SGD gives any results.""" clf = self.factory(alpha=0.1, n_iter=2, fit_intercept=False) clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2]) assert_equal(clf.coef_[0], clf.coef_[1]) @raises(ValueError) def test_sgd_bad_penalty(self): """Check whether expected ValueError on bad penalty""" self.factory(penalty='foobar', l1_ratio=0.85) @raises(ValueError) def test_sgd_bad_loss(self): """Check whether expected ValueError on bad loss""" self.factory(loss="foobar") def test_sgd_least_squares_fit(self): xmin, xmax = -5, 5 n_samples = 100 rng = np.random.RandomState(0) X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) # simple linear function without noise y = 0.5 * X.ravel() clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.99) # simple linear function with noise y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.5) def test_sgd_epsilon_insensitive(self): xmin, xmax = -5, 5 n_samples = 100 X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) # simple linear function without noise y = 0.5 * X.ravel() clf = self.factory(loss='epsilon_insensitive', epsilon=0.01, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_true(score > 0.99) # simple linear function with noise y = 0.5 * X.ravel() \ + np.random.randn(n_samples, 1).ravel() clf = self.factory(loss='epsilon_insensitive', epsilon=0.01, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_true(score > 0.5) def test_sgd_huber_fit(self): xmin, xmax = -5, 5 n_samples = 100 rng = np.random.RandomState(0) X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) # simple linear function without noise y = 0.5 * X.ravel() clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.99) # simple linear function with noise y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.5) def test_elasticnet_convergence(self): """Check that the SGD ouput is consistent with coordinate descent""" n_samples, n_features = 1000, 5 rng = np.random.RandomState(0) X = np.random.randn(n_samples, n_features) # ground_truth linear model that generate y from X and to which the # models should converge if the regularizer would be set to 0.0 ground_truth_coef = rng.randn(n_features) y = np.dot(X, ground_truth_coef) # XXX: alpha = 0.1 seems to cause convergence problems for alpha in [0.01, 0.001]: for l1_ratio in [0.5, 0.8, 1.0]: cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False) cd.fit(X, y) sgd = self.factory(penalty='elasticnet', n_iter=50, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False) sgd.fit(X, y) err_msg = ("cd and sgd did not converge to comparable " "results for alpha=%f and l1_ratio=%f" % (alpha, l1_ratio)) assert_almost_equal(cd.coef_, sgd.coef_, decimal=2, err_msg=err_msg) def test_partial_fit(self): third = X.shape[0] // 3 clf = self.factory(alpha=0.01) clf.partial_fit(X[:third], Y[:third]) assert_equal(clf.coef_.shape, (X.shape[1], )) assert_equal(clf.intercept_.shape, (1,)) assert_equal(clf.decision_function([0, 0]).shape, (1, )) id1 = id(clf.coef_.data) clf.partial_fit(X[third:], Y[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated assert_true(id1, id2) def _test_partial_fit_equal_fit(self, lr): clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01, learning_rate=lr, shuffle=False) clf.fit(X, Y) y_pred = clf.predict(T) t = clf.t_ clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False) for i in range(2): clf.partial_fit(X, Y) y_pred2 = clf.predict(T) assert_equal(clf.t_, t) assert_array_almost_equal(y_pred, y_pred2, decimal=2) def test_partial_fit_equal_fit_constant(self): self._test_partial_fit_equal_fit("constant") def test_partial_fit_equal_fit_optimal(self): self._test_partial_fit_equal_fit("optimal") def test_partial_fit_equal_fit_invscaling(self): self._test_partial_fit_equal_fit("invscaling") def test_loss_function_epsilon(self): clf = self.factory(epsilon=0.9) clf.set_params(epsilon=0.1) assert clf.loss_functions['huber'][1] == 0.1 class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase): """Run exactly the same tests using the sparse representation variant""" factory = SparseSGDRegressor
bsd-3-clause
ryandougherty/mwa-capstone
MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/axes_zoom_effect.py
3
3293
from matplotlib.transforms import Bbox, TransformedBbox, \ blended_transform_factory from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector,\ BboxConnectorPatch def connect_bbox(bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, prop_lines, prop_patches=None): if prop_patches is None: prop_patches = prop_lines.copy() prop_patches["alpha"] = prop_patches.get("alpha", 1)*0.2 c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines) c1.set_clip_on(False) c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines) c2.set_clip_on(False) bbox_patch1 = BboxPatch(bbox1, **prop_patches) bbox_patch2 = BboxPatch(bbox2, **prop_patches) p = BboxConnectorPatch(bbox1, bbox2, #loc1a=3, loc2a=2, loc1b=4, loc2b=1, loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b, **prop_patches) p.set_clip_on(False) return c1, c2, bbox_patch1, bbox_patch2, p def zoom_effect01(ax1, ax2, xmin, xmax, **kwargs): u""" ax1 : the main axes ax1 : the zoomed axes (xmin,xmax) : the limits of the colored area in both plot axes. connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will be marked. The keywords parameters will be used ti create patches. """ trans1 = blended_transform_factory(ax1.transData, ax1.transAxes) trans2 = blended_transform_factory(ax2.transData, ax2.transAxes) bbox = Bbox.from_extents(xmin, 0, xmax, 1) mybbox1 = TransformedBbox(bbox, trans1) mybbox2 = TransformedBbox(bbox, trans2) prop_patches=kwargs.copy() prop_patches["ec"]="none" prop_patches["alpha"]=0.2 c1, c2, bbox_patch1, bbox_patch2, p = \ connect_bbox(mybbox1, mybbox2, loc1a=3, loc2a=2, loc1b=4, loc2b=1, prop_lines=kwargs, prop_patches=prop_patches) ax1.add_patch(bbox_patch1) ax2.add_patch(bbox_patch2) ax2.add_patch(c1) ax2.add_patch(c2) ax2.add_patch(p) return c1, c2, bbox_patch1, bbox_patch2, p def zoom_effect02(ax1, ax2, **kwargs): u""" ax1 : the main axes ax1 : the zoomed axes Similar to zoom_effect01. The xmin & xmax will be taken from the ax1.viewLim. """ tt = ax1.transScale + (ax1.transLimits + ax2.transAxes) trans = blended_transform_factory(ax2.transData, tt) mybbox1 = ax1.bbox mybbox2 = TransformedBbox(ax1.viewLim, trans) prop_patches=kwargs.copy() prop_patches["ec"]="none" prop_patches["alpha"]=0.2 c1, c2, bbox_patch1, bbox_patch2, p = \ connect_bbox(mybbox1, mybbox2, loc1a=3, loc2a=2, loc1b=4, loc2b=1, prop_lines=kwargs, prop_patches=prop_patches) ax1.add_patch(bbox_patch1) ax2.add_patch(bbox_patch2) ax2.add_patch(c1) ax2.add_patch(c2) ax2.add_patch(p) return c1, c2, bbox_patch1, bbox_patch2, p import matplotlib.pyplot as plt plt.figure(1, figsize=(5,5)) ax1 = plt.subplot(221) ax2 = plt.subplot(212) ax2.set_xlim(0, 1) ax2.set_xlim(0, 5) zoom_effect01(ax1, ax2, 0.2, 0.8) ax1 = plt.subplot(222) ax1.set_xlim(2, 3) ax2.set_xlim(0, 5) zoom_effect02(ax1, ax2) plt.show()
gpl-2.0
semio/ddf_utils
ddf_utils/chef/procedure/window.py
1
3738
# -*- coding: utf-8 -*- """window procedure for recipes""" import logging from typing import List import pandas as pd from .. helpers import debuggable, read_opt, mkfunc from .. model.ingredient import DataPointIngredient from .. model.chef import Chef logger = logging.getLogger('window') @debuggable def window(chef: Chef, ingredients: List[DataPointIngredient], result, **options) -> DataPointIngredient: """apply functions on a rolling window .. highlight:: yaml Procedure format: :: procedure: window ingredients: # list of ingredient id - ingredient_id result: str # new ingredient id options: window: column: str # column which window is created from size: int or 'expanding' # if int then rolling window, if expanding then expanding window min_periods: int # as in pandas center: bool # as in pandas aggregate: dict Two styles of function block are supported, and they can mix in one procedure: :: aggregate: col1: sum # run rolling sum to col1 col2: mean # run rolling mean to col2 col3: # run foo to col3 with param1=baz function: foo param1: baz Keyword Args ------------ window: dict window definition, see above for the dictionary format aggregate: dict aggregation functions Examples -------- An example of rolling windows: .. highlight:: yaml :: procedure: window ingredients: - ingredient_to_roll result: new_ingredient_id options: window: column: year size: 10 min_periods: 1 center: false aggregate: column_to_aggregate: sum Notes ----- Any column not mentioned in the `aggregate` block will be dropped in the returned ingredient. """ assert len(ingredients) == 1, "procedure only support 1 ingredient for now." # ingredient = chef.dag.get_node(ingredients[0]).evaluate() ingredient = ingredients[0] logger.info('window: ' + ingredient.id) # reading options window = options.pop('window') aggregate = options.pop('aggregate') column = read_opt(window, 'column', required=True) size = read_opt(window, 'size', required=True) min_periods = read_opt(window, 'min_periods', default=0) center = read_opt(window, 'center', default=False) data = ingredient.compute() newdata = dict() for k, func in aggregate.items(): f = mkfunc(func) # keys for grouping. in multidimensional data like datapoints, we want create # groups before rolling. Just group all key column except the column to aggregate. keys = ingredient.key.copy() df = data[k].copy() # always sort before rolling df = df.sort_values(keys) # then remove the rolling column from primary keys, group by remaining keys keys.remove(column) if size == 'expanding': res = [] groups = df.groupby(by=keys, sort=False) for _, df_g in groups: res.append(df_g.set_index(ingredient.key) .expanding(min_periods=min_periods, center=center).agg({k: f})) newdata[k] = pd.concat(res, sort=False).reset_index() else: newdata[k] = (df.groupby(by=keys, sort=False) .rolling(on=column, window=size, min_periods=min_periods, center=center) .agg({k: f}).reset_index(ingredient.key).dropna()) return DataPointIngredient.from_procedure_result(result, ingredient.key, newdata)
mit
dotsdl/msmbuilder
msmbuilder/featurizer/subset_featurizer.py
2
9327
import itertools import numpy as np import mdtraj as md from . import Featurizer, TrajFeatureUnion ATOM_NAMES = ["N", "CA", "CB", "C", "O", "H"] def get_atompair_indices(reference_traj, keep_atoms=ATOM_NAMES, exclude_atoms=None, reject_bonded=True): """Get a list of acceptable atom pairs. Parameters ---------- reference_traj : mdtraj.Trajectory Trajectory to grab atom pairs from keep_atoms : np.ndarray, dtype=string, optional Select only these atom names exclude_atoms : np.ndarray, dtype=string, optional Exclude these atom names reject_bonded : bool, default=True If True, exclude bonded atompairs. Returns ------- atom_indices : np.ndarray, dtype=int The atom indices that pass your criteria pair_indices : np.ndarray, dtype=int, shape=(N, 2) Pairs of atom indices that pass your criteria. Notes ----- This function has been optimized for speed. A naive implementation can be slow (~minutes) for large proteins. """ top, bonds = reference_traj.top.to_dataframe() if keep_atoms is not None: atom_indices = top[top.name.isin(keep_atoms) == True].index.values if exclude_atoms is not None: atom_indices = top[top.name.isin(exclude_atoms) == False].index.values pair_indices = np.array(list(itertools.combinations(atom_indices, 2))) if reject_bonded: a_list = bonds.min(1) b_list = bonds.max(1) n = atom_indices.max() + 1 bond_hashes = a_list + b_list * n pair_hashes = pair_indices[:, 0] + pair_indices[:,1] * n not_bonds = ~np.in1d(pair_hashes, bond_hashes) pair_indices = np.array([(a, b) for k, (a, b) in enumerate(pair_indices) if not_bonds[k]]) return atom_indices, pair_indices def _lookup_pairs_subset(all_pair_indices, subset_pair_indices, n_choose=None): """Convert pairs of atom indices into a list of indices Parameters ---------- all_pair_indices : np.ndarray, dtype='int', shape=(N, 2) All allowed pairs of atom indices subset_pair_indices : np.ndarray, dtype=int, shape=(n, 2) A select subset of the atom pairs n_choose : int, default=None if not None, return at most this many indices Returns ------- subset : np.ndarray, dtype=int, shape=(n) A numpy array with the integer indices that map subset_pair_indices onto all_pair_indices. That is, subset[k] indices the value of all_pair_indices that matches subset_pair_indices[k] Notes ----- This function is mostly useful when you have two lists of atom_pair indices and you want to find "indices" mapping the smaller to the larger list. This could occur when you are looking at different atom_pair featurizers. """ n = all_pair_indices.max() all_keys = all_pair_indices[:, 0] + n * all_pair_indices[:, 1] optimal_keys = subset_pair_indices[:, 0] + n * subset_pair_indices[:, 1] subset = np.where(np.in1d(all_keys, optimal_keys))[0] if n_choose is not None: subset[0:min(len(subset), n_choose)] = subset[0:min(len(subset), n_choose)] return subset class BaseSubsetFeaturizer(Featurizer): """Base class for featurizers that have a subset of active features. n_features refers to the number of active features. n_max refers to the number of possible features. Parameters ---------- reference_traj : mdtraj.Trajectory Reference Trajectory for checking consistency subset : np.ndarray, default=None, dtype=int The values in subset specify which of all possible features Notes ----- As an example, suppose we have an instance that has `n_max` = 5. This means that the possible features are subsets of [0, 1, 2, 3, 4]. One possible subset is then [0, 1, 3]. The allowed values of subset (e.g. `n_max`) will be determined by the subclass--e.g. for example, `n_max` might be the number of phi backbone angles. """ def __init__(self, reference_traj, subset=None): self.reference_traj = reference_traj if subset is not None: self.subset = subset else: self.subset = np.zeros(0, 'int') @property def n_features(self): return len(self.subset) class SubsetAtomPairs(BaseSubsetFeaturizer): """Subset featurizer based on atom pair distances. Parameters ---------- possible_pair_indices : np.ndarray, dtype=int, shape=(n_max, 2) These are the possible atom indices to use for calculating interatomic distances. reference_traj : mdtraj.Trajectory Reference Trajectory for checking consistency subset : np.ndarray, default=None, dtype=int The values in subset specify which of all possible features are to be enabled. Specifically, atom pair distances are calculated for the pairs `possible_pair_indices[subset]` periodic : bool, optional, default=False if True, use periodic boundary condition wrapping exponent : float, optional, default=1.0 Use the distances to this power as the output feature. See Also -------- See `get_atompair_indices` for how one might generate acceptable atom pair indices. """ def __init__(self, possible_pair_indices, reference_traj, subset=None, periodic=False, exponent=1.0): super(SubsetAtomPairs, self).__init__(reference_traj, subset=subset) self.possible_pair_indices = possible_pair_indices self.periodic = periodic self.exponent = exponent if subset is None: self.subset = np.zeros(0, 'int') else: self.subset = subset @property def n_max(self): return len(self.possible_pair_indices) def partial_transform(self, traj): if self.n_features > 0: features = md.geometry.compute_distances(traj, self.pair_indices, periodic=self.periodic) ** self.exponent else: features = np.zeros((traj.n_frames, 0)) return features @property def pair_indices(self): return self.possible_pair_indices[self.subset] class SubsetTrigFeaturizer(BaseSubsetFeaturizer): """Base class for featurizer based on dihedral sine or cosine. Notes ----- Subsets must be a subset of 0, ..., n_max - 1, where n_max is determined by the number of respective phi / psi dihedrals in your protein, as calcualted by mdtraj.compute_phi and mdtraj.compute_psi """ def partial_transform(self, traj): if self.n_features > 0: dih = md.geometry.dihedral.compute_dihedrals(traj, self.which_atom_ind[self.subset]) features = self.trig_function(dih) else: features = np.zeros((traj.n_frames, 0)) return features @property def n_max(self): return len(self.which_atom_ind) class CosMixin(object): def trig_function(self, dihedrals): return np.cos(dihedrals) class SinMixin(object): def trig_function(self, dihedrals): return np.sin(dihedrals) class PhiMixin(object): @property def which_atom_ind(self): atom_indices, dih = md.geometry.dihedral.compute_phi(self.reference_traj) return atom_indices class PsiMixin(object): @property def which_atom_ind(self): atom_indices, dih = md.geometry.dihedral.compute_psi(self.reference_traj) return atom_indices class SubsetCosPhiFeaturizer(SubsetTrigFeaturizer, CosMixin, PhiMixin): pass class SubsetCosPsiFeaturizer(SubsetTrigFeaturizer, CosMixin, PhiMixin): pass class SubsetSinPhiFeaturizer(SubsetTrigFeaturizer, SinMixin, PsiMixin): pass class SubsetSinPsiFeaturizer(SubsetTrigFeaturizer, SinMixin, PsiMixin): pass class SubsetFeatureUnion(TrajFeatureUnion): """Mixtape version of sklearn.pipeline.FeatureUnion with feature subset selection. Notes ----- Works on lists of trajectories. Has a hacky convenience method to set all subsets at once. """ @property def subsets(self): return [featurizer.subset for (_, featurizer) in self.transformer_list] @subsets.setter def subsets(self, value): assert len(value) == len(self.transformer_list), "wrong len" for k, (_, featurizer) in enumerate(self.transformer_list): featurizer.subset = value[k] @property def n_max_i(self): return np.array([featurizer.n_max for (_, featurizer) in self.transformer_list]) @property def n_features_i(self): return np.array([featurizer.n_features for (_, featurizer) in self.transformer_list]) @property def n_featurizers(self): return len(self.transformer_list) @property def n_max(self): return np.sum([featurizer.n_max for (_, featurizer) in self.transformer_list]) @property def n_features(self): return sum([featurizer.n_features for (_, featurizer) in self.transformer_list]) class DummyCV(object): """A cross-validation object that returns identical training and test sets.""" def __init__(self, n): self.n = n def __iter__(self): yield np.arange(self.n), np.arange(self.n) def __len__(self): return self.n
lgpl-2.1
ishanic/scikit-learn
examples/ensemble/plot_voting_probas.py
316
2824
""" =========================================================== Plot class probabilities calculated by the VotingClassifier =========================================================== Plot the class probabilities of the first sample in a toy dataset predicted by three different classifiers and averaged by the `VotingClassifier`. First, three examplary classifiers are initialized (`LogisticRegression`, `GaussianNB`, and `RandomForestClassifier`) and used to initialize a soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that the predicted probabilities of the `RandomForestClassifier` count 5 times as much as the weights of the other classifiers when the averaged probability is calculated. To visualize the probability weighting, we fit each classifier on the training set and plot the predicted class probabilities for the first sample in this example dataset. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) y = np.array([1, 1, 2, 2]) eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', weights=[1, 1, 5]) # predict class probabilities for all classifiers probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)] # get class probabilities for the first sample in the dataset class1_1 = [pr[0, 0] for pr in probas] class2_1 = [pr[0, 1] for pr in probas] # plotting N = 4 # number of groups ind = np.arange(N) # group positions width = 0.35 # bar width fig, ax = plt.subplots() # bars for classifier 1-3 p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green') p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen') # bars for VotingClassifier p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue') p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue') # plot annotations plt.axvline(2.8, color='k', linestyle='dashed') ax.set_xticks(ind + width) ax.set_xticklabels(['LogisticRegression\nweight 1', 'GaussianNB\nweight 1', 'RandomForestClassifier\nweight 5', 'VotingClassifier\n(average probabilities)'], rotation=40, ha='right') plt.ylim([0, 1]) plt.title('Class probabilities for sample 1 by different classifiers') plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left') plt.show()
bsd-3-clause
tosolveit/scikit-learn
sklearn/cluster/tests/test_affinity_propagation.py
341
2620
""" Testing for Clustering methods """ import numpy as np from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.cluster.affinity_propagation_ import AffinityPropagation from sklearn.cluster.affinity_propagation_ import affinity_propagation from sklearn.datasets.samples_generator import make_blobs from sklearn.metrics import euclidean_distances n_clusters = 3 centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 X, _ = make_blobs(n_samples=60, n_features=2, centers=centers, cluster_std=0.4, shuffle=True, random_state=0) def test_affinity_propagation(): # Affinity Propagation algorithm # Compute similarities S = -euclidean_distances(X, squared=True) preference = np.median(S) * 10 # Compute Affinity Propagation cluster_centers_indices, labels = affinity_propagation( S, preference=preference) n_clusters_ = len(cluster_centers_indices) assert_equal(n_clusters, n_clusters_) af = AffinityPropagation(preference=preference, affinity="precomputed") labels_precomputed = af.fit(S).labels_ af = AffinityPropagation(preference=preference, verbose=True) labels = af.fit(X).labels_ assert_array_equal(labels, labels_precomputed) cluster_centers_indices = af.cluster_centers_indices_ n_clusters_ = len(cluster_centers_indices) assert_equal(np.unique(labels).size, n_clusters_) assert_equal(n_clusters, n_clusters_) # Test also with no copy _, labels_no_copy = affinity_propagation(S, preference=preference, copy=False) assert_array_equal(labels, labels_no_copy) # Test input validation assert_raises(ValueError, affinity_propagation, S[:, :-1]) assert_raises(ValueError, affinity_propagation, S, damping=0) af = AffinityPropagation(affinity="unknown") assert_raises(ValueError, af.fit, X) def test_affinity_propagation_predict(): # Test AffinityPropagation.predict af = AffinityPropagation(affinity="euclidean") labels = af.fit_predict(X) labels2 = af.predict(X) assert_array_equal(labels, labels2) def test_affinity_propagation_predict_error(): # Test exception in AffinityPropagation.predict # Not fitted. af = AffinityPropagation(affinity="euclidean") assert_raises(ValueError, af.predict, X) # Predict not supported when affinity="precomputed". S = np.dot(X, X.T) af = AffinityPropagation(affinity="precomputed") af.fit(S) assert_raises(ValueError, af.predict, X)
bsd-3-clause
leggitta/mne-python
examples/connectivity/plot_mne_inverse_connectivity_spectrum.py
18
3465
""" ============================================================== Compute full spectrum source space connectivity between labels ============================================================== The connectivity is computed between 4 labels across the spectrum between 5 and 40 Hz. """ # Authors: Alexandre Gramfort <[email protected]> # # License: BSD (3-clause) import matplotlib.pyplot as plt import mne from mne.datasets import sample from mne.io import Raw from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator from mne.connectivity import spectral_connectivity print(__doc__) data_path = sample.data_path() subjects_dir = data_path + '/subjects' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' # Load data inverse_operator = read_inverse_operator(fname_inv) raw = Raw(fname_raw) events = mne.read_events(fname_event) # Add a bad channel raw.info['bads'] += ['MEG 2443'] # Pick MEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, exclude='bads') # Define epochs for left-auditory condition event_id, tmin, tmax = 1, -0.2, 0.5 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6)) # Compute inverse solution and for each epoch. By using "return_generator=True" # stcs will be a generator object instead of a list. snr = 1.0 # use lower SNR for single epochs lambda2 = 1.0 / snr ** 2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, pick_ori="normal", return_generator=True) # Read some labels names = ['Aud-lh', 'Aud-rh', 'Vis-lh', 'Vis-rh'] labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % name) for name in names] # Average the source estimates within each label using sign-flips to reduce # signal cancellations, also here we return a generator src = inverse_operator['src'] label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip', return_generator=True) fmin, fmax = 5., 40. sfreq = raw.info['sfreq'] # the sampling frequency con, freqs, times, n_epochs, n_tapers = spectral_connectivity( label_ts, method='wpli2_debiased', mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=2) n_rows, n_cols = con.shape[:2] fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True) plt.suptitle('Between labels connectivity') for i in range(n_rows): for j in range(i + 1): if i == j: axes[i, j].set_axis_off() continue axes[i, j].plot(freqs, con[i, j, :]) axes[j, i].plot(freqs, con[i, j, :]) if j == 0: axes[i, j].set_ylabel(names[i]) axes[0, i].set_title(names[i]) if i == (n_rows - 1): axes[i, j].set_xlabel(names[j]) axes[i, j].set_xlim([fmin, fmax]) axes[j, i].set_xlim([fmin, fmax]) # Show band limits for f in [8, 12, 18, 35]: axes[i, j].axvline(f, color='k') axes[j, i].axvline(f, color='k') plt.show()
bsd-3-clause
osvaldshpengler/BuildingMachineLearningSystemsWithPython
ch03/noise_analysis.py
24
2412
# This code is supporting material for the book # Building Machine Learning Systems with Python # by Willi Richert and Luis Pedro Coelho # published by PACKT Publishing # # It is made available under the MIT License import sklearn.datasets groups = [ 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space'] train_data = sklearn.datasets.fetch_20newsgroups(subset="train", categories=groups) labels = train_data.target num_clusters = 50 # sp.unique(labels).shape[0] import nltk.stem english_stemmer = nltk.stem.SnowballStemmer('english') from sklearn.feature_extraction.text import TfidfVectorizer class StemmedTfidfVectorizer(TfidfVectorizer): def build_analyzer(self): analyzer = super(TfidfVectorizer, self).build_analyzer() return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc)) vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5, stop_words='english', decode_error='ignore' ) vectorized = vectorizer.fit_transform(train_data.data) post_group = zip(train_data.data, train_data.target) # Create a list of tuples that can be sorted by # the length of the posts all = [(len(post[0]), post[0], train_data.target_names[post[1]]) for post in post_group] graphics = sorted([post for post in all if post[2] == 'comp.graphics']) print(graphics[5]) # (245, 'From: [email protected]\nSubject: test....(sorry)\nOrganization: # The University of Birmingham, United Kingdom\nLines: 1\nNNTP-Posting-Host: ibm3090.bham.ac.uk # \n\n==============================================================================\n', # 'comp.graphics') noise_post = graphics[5][1] analyzer = vectorizer.build_analyzer() print(list(analyzer(noise_post))) useful = set(analyzer(noise_post)).intersection(vectorizer.get_feature_names()) print(sorted(useful)) # ['ac', 'birmingham', 'host', 'kingdom', 'nntp', 'sorri', 'test', 'uk', 'unit', 'univers'] for term in sorted(useful): print('IDF(%s)=%.2f' % (term, vectorizer._tfidf.idf_[vectorizer.vocabulary_[term]])) # IDF(ac)=3.51 # IDF(birmingham)=6.77 # IDF(host)=1.74 # IDF(kingdom)=6.68 # IDF(nntp)=1.77 # IDF(sorri)=4.14 # IDF(test)=3.83 # IDF(uk)=3.70 # IDF(unit)=4.42 # IDF(univers)=1.91
mit
AleksanderLidtke/XKCD
slack/slack.py
1
10258
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ My previous project seemed more or less impossible from the outset. I knew that we had to do the best it gets to stand a chance of delivering it. I normally read a lot about productivity, team work, leadership and the like. By chance, I'd read of Slack, the communication tool a few weeks before joining that project. So we gave it a shot. And it worked. Slack, the app that puts rovers on other planets (or satellites in low-Earth orbit). """ import pandas, numpy, matplotlib.pyplot, matplotlib.ticker # Various font sizes. ticksFontSize=18 labelsFontSizeSmall=20 labelsFontSize=30 titleFontSize=34 legendFontSize=20 matplotlib.rc('xtick',labelsize=ticksFontSize) matplotlib.rc('ytick',labelsize=ticksFontSize) matplotlib.pyplot.xkcd() # C'est le shit. SAVEFIG=True # Automatically save figures to CWD? # Read the data from the message, channel and user statistics files. dfMsg=pandas.read_csv('SlackAnalytics03Aug2018.csv',sep=',',header=0, # First column (date) should be the index to enable resampling. index_col=0, # Interpret column 0 as dates, which it is. parse_dates=[0],infer_datetime_format=True) dfMsg1WkSum=dfMsg.resample('1W').sum() # Weekly sum of messages. dfCh =pandas.read_csv('ChannelAnalytics03Aug2018.csv',sep=',',header=0,index_col=1, parse_dates=[1],infer_datetime_format=True) dfUsr=pandas.read_csv('UserAnalytics03Aug2018.csv',sep=',',header=0,index_col=0, parse_dates=[2],infer_datetime_format=True) # Plot the message data. public=dfMsg1WkSum['Messages in Public Channels'] private=dfMsg1WkSum['Messages in Private Channels'] direct=dfMsg1WkSum['Messages in DMs'] fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(14,8)) ax.plot(dfMsg1WkSum.index,public,c='indigo', ls='-',lw=3,marker=None,label=r'$Public\ channel$') ax.plot(dfMsg1WkSum.index,private,c='deepskyblue', ls='-',lw=3,marker=None,label=r'$Private\ channel$') ax.plot(dfMsg1WkSum.index,direct,c='crimson', ls='-',lw=3,marker=None,label=r'$Direct\ messages$') ax.set_axisbelow(True) ax.set_xlabel(r'$Date\ (JST)$',fontsize=labelsFontSize) ax.set_ylabel(r'$Weekly\ messages$',fontsize=labelsFontSize) matplotlib.pyplot.subplots_adjust(left=0.1,right=0.95,bottom=0.15,top=0.84) ax.legend(bbox_to_anchor=(0.5,1.23),loc='upper center', prop={'size':legendFontSize},fancybox=True,shadow=True,ncol=3) ax.grid(linewidth=1,linestyle=':',which='major') ax.grid(linewidth=0.1,linestyle='--',which='minor') ax.tick_params(axis='both',reset=False,which='both',length=5,width=1.5) ax.annotate(r'$FM\ AIT$',size=ticksFontSize,xy=('2018-7-7',875), xytext=('2018-3-1',800),arrowprops=dict(facecolor='black',shrink=0.05)) ax.annotate(r'$Bounenkai$',size=ticksFontSize,xy=('2018-1-4',70), xytext=('2017-10-1',500),arrowprops=dict(facecolor='black',shrink=0.05)) ax.annotate(r'$CDR$',size=ticksFontSize,xy=('2018-5-20',510), xytext=('2018-1-1',600),arrowprops=dict(facecolor='black',shrink=0.05)) ax.annotate(r'$Summer\ ends$',size=ticksFontSize,xy=('2017-10-28',210), xytext=('2017-6-1',400),arrowprops=dict(facecolor='black',shrink=0.05)) ax.annotate(r'$PDR$',size=ticksFontSize,xy=('2017-8-8',170), xytext=('2017-4-1',300),arrowprops=dict(facecolor='black',shrink=0.05)) fig.autofmt_xdate() fig.show() if SAVEFIG: fig.savefig('globalMessageHistory.png') # Plot the user data. File data aren't so interesting. fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(14,8)) # Plot files uploaded per week - filr data (users as well) are too noisy to be # interpreted on the time scale of days. #ax.plot(dfMsg1WkSum.index,dfMsg1WkSum['Files Uploaded'],c='indigo', # ls='-',lw=3,marker=None,label=r'$Files\ uploaded$') # Have weekly user data from Slack, so plot that w/o resampling. ax.plot(dfMsg.index,dfMsg['Full Members'],c='deepskyblue', ls='-',lw=3,marker=None,label=r'$Registered$') ax.plot(dfMsg.index,dfMsg['Weekly Active Users'],c='crimson', ls='-',lw=3,marker=None,label=r'$Active$') ax.plot(dfMsg.index,dfMsg['Weekly Users Posting Messages'],c='gold', ls='-',lw=3,marker=None,label=r'$Posting$') ax.set_axisbelow(True) ax.set_xlabel(r'$Date\ (JST)$',fontsize=labelsFontSize) ax.set_ylabel(r'$Weekly\ users$',fontsize=labelsFontSize) matplotlib.pyplot.subplots_adjust(left=0.1,right=0.95,bottom=0.15,top=0.83) ax.legend(bbox_to_anchor=(0.5,1.23),loc='upper center', prop={'size':legendFontSize},fancybox=True,shadow=True,ncol=3) ax.grid(linewidth=1,linestyle=':',which='major') ax.grid(linewidth=0.1,linestyle='--',which='minor') ax.tick_params(axis='both',reset=False,which='both',length=5,width=1.5) ax.set_ylim(bottom=-20) ax.annotate(r'$Bounenkai$',size=ticksFontSize,xy=('2018-1-4',2), xytext=('2018-3-1',-10),arrowprops=dict(facecolor='black',shrink=0.05)) ax.annotate(r'$Obon$',size=ticksFontSize,xy=('2017-8-19',0), xytext=('2017-6-1',-15),arrowprops=dict(facecolor='black',shrink=0.05)) fig.autofmt_xdate() fig.show() if SAVEFIG: fig.savefig('userNumberHistory.png') # Plot the inactive user numbers. registered=dfMsg['Full Members'] active=dfMsg['Weekly Active Users'] posting=dfMsg['Weekly Users Posting Messages'] lower=min(registered.min(),active.min(),posting.min()) # Will only subtract, so won't exceed the original no. users in any series. upper=min(registered.max(),active.max(),posting.max()) fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(14,8)) ax.plot(dfMsg.index,registered-active,c='deepskyblue', ls='-',lw=3,marker=None,label=r'$Inactive$') ax.plot(dfMsg.index,active-posting,c='crimson', ls='-',lw=3,marker=None,label=r'$Active\ not\ posting$') ax.set_axisbelow(True) ax.set_xlabel(r'$Date\ (JST)$',fontsize=labelsFontSize) ax.set_ylabel(r'$Weekly\ users$',fontsize=labelsFontSize) matplotlib.pyplot.subplots_adjust(left=0.1,right=0.95,bottom=0.15,top=0.84) ax.legend(bbox_to_anchor=(0.5,1.23),loc='upper center', prop={'size':legendFontSize},fancybox=True,shadow=True,ncol=2) ax.grid(linewidth=1,linestyle=':',which='major') ax.grid(linewidth=0.1,linestyle='--',which='minor') ax.tick_params(axis='both',reset=False,which='both',length=5,width=1.5) ax.set_yticks(numpy.arange(numpy.floor(lower),numpy.ceil(upper),5)) ax.annotate(r'$Bounenkai$',size=ticksFontSize,xy=('2018-1-4',16), xytext=('2018-3-1',17),arrowprops=dict(facecolor='black',shrink=0.05)) ax.annotate(r'$Obon$',size=ticksFontSize,xy=('2017-8-19',17), xytext=('2017-6-1',15),arrowprops=dict(facecolor='black',shrink=0.05)) fig.autofmt_xdate() fig.show() if SAVEFIG: fig.savefig('activeSilentUserNumberHistory.png') # Plot ratio of active and inactive users. fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(14,8)) ax.plot(dfMsg.index,(registered-active)/registered,c='deepskyblue', ls='-',lw=3,marker=None,label=r'$Inactive$') ax.plot(dfMsg.index,(active-posting)/registered,c='crimson', ls='-',lw=3,marker=None,label=r'$Active\ not\ posting$') ax.set_axisbelow(True) ax.set_xlabel(r'$Date\ (JST)$',fontsize=labelsFontSize) ax.set_ylabel(r'$Weekly\ user\ ratio$',fontsize=labelsFontSize) matplotlib.pyplot.subplots_adjust(left=0.1,right=0.95,bottom=0.15,top=0.84) ax.legend(bbox_to_anchor=(0.5,1.23),loc='upper center', prop={'size':legendFontSize},fancybox=True,shadow=True,ncol=2) ax.grid(linewidth=1,linestyle=':',which='major') ax.grid(linewidth=0.1,linestyle='--',which='minor') ax.tick_params(axis='both',reset=False,which='both',length=5,width=1.5) ax.annotate(r'$Bounenkai$',size=ticksFontSize,xy=('2018-1-4',0.67), xytext=('2018-3-1',0.8),arrowprops=dict(facecolor='black',shrink=0.05)) ax.annotate(r'$Obon$',size=ticksFontSize,xy=('2017-8-19',0.85), xytext=('2017-6-1',0.95),arrowprops=dict(facecolor='black',shrink=0.05)) ax.set_ylim(bottom=-0.1,top=1.1) fig.autofmt_xdate() fig.show() if SAVEFIG: fig.savefig('activeSilentUserRatioHistory.png') # Plot channel users & messages VS age (datetime.now()-date created) fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(14,8)) ax.plot( (pandas.Timestamp(2018,8,15)-dfCh.index).days, # Channel age. dfCh['Messages Posted'],c='indigo',lw=0,marker='o',ms=10) ax.set_axisbelow(True) ax.set_xlabel(r'$Channel\ age\ (days)$',fontsize=labelsFontSize) ax.set_ylabel(r'$Messages\ posted$',fontsize=labelsFontSize) matplotlib.pyplot.subplots_adjust(left=0.1,right=0.95,bottom=0.15,top=0.84) ax.grid(linewidth=1,linestyle=':',which='major') ax.grid(linewidth=0.1,linestyle='--',which='minor') ax.tick_params(axis='both',reset=False,which='both',length=5,width=1.5) ax.annotate(r'$random\ =\ we\ are\ to\ the\ point$',size=ticksFontSize,xy=(555,65), xytext=(250,200),arrowprops=dict(facecolor='black',shrink=0.05)) fig.show() if SAVEFIG: fig.savefig('channelPopularityVSAge.png') # User age VS activity. Don't blame anyone. userAges=(pandas.Timestamp(2018,8,15)-dfUsr['Account Creation Date']).values/(1e9*24*3600) fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(14,8)) ax.plot(userAges,dfUsr['chats_sent'],c='indigo',lw=0,marker='o',ms=10,label=r'$User$') ax.plot([userAges.min(),userAges.max()], [dfUsr['chats_sent'].mean(),dfUsr['chats_sent'].mean()],ls='--',lw=2, c='deepskyblue',label=r"$Mean$") ax.plot([userAges.min(),userAges.max()], [dfUsr['chats_sent'].median(),dfUsr['chats_sent'].median()],ls='--',lw=2, c='crimson',label=r"$Median$") ax.set_axisbelow(True) ax.set_xlabel(r'$User\ age\ (days)$',fontsize=labelsFontSize) ax.set_ylabel(r'$Chats\ sent$',fontsize=labelsFontSize) matplotlib.pyplot.subplots_adjust(left=0.1,right=0.95,bottom=0.15,top=0.84) ax.legend(bbox_to_anchor=(0.5,1.23),loc='upper center', prop={'size':legendFontSize},fancybox=True,shadow=True,ncol=3) ax.grid(linewidth=1,linestyle=':',which='major') ax.grid(linewidth=0.1,linestyle='--',which='minor') ax.tick_params(axis='both',reset=False,which='both',length=5,width=1.5) ax.text(x=100,y=550,s=r'${0:.2f}$'.format(dfUsr['chats_sent'].mean()), color='deepskyblue',size=ticksFontSize) ax.text(x=100,y=250,s=r'${0:.2f}$'.format(dfUsr['chats_sent'].median()), color='crimson',size=ticksFontSize) fig.show() if SAVEFIG: fig.savefig('userVerboseness.png') input()
mit
RobertABT/heightmap
build/matplotlib/examples/misc/contour_manual.py
12
1630
""" Example of displaying your own contour lines and polygons using ContourSet. """ import matplotlib.pyplot as plt from matplotlib.contour import ContourSet import matplotlib.cm as cm # Contour lines for each level are a list/tuple of polygons. lines0 = [ [[0,0],[0,4]] ] lines1 = [ [[2,0],[1,2],[1,3]] ] lines2 = [ [[3,0],[3,2]], [[3,3],[3,4]] ] # Note two lines. # Filled contours between two levels are also a list/tuple of polygons. # Points can be ordered clockwise or anticlockwise. filled01 = [ [[0,0],[0,4],[1,3],[1,2],[2,0]] ] filled12 = [ [[2,0],[3,0],[3,2],[1,3],[1,2]], # Note two polygons. [[1,4],[3,4],[3,3]] ] plt.figure() # Filled contours using filled=True. cs = ContourSet(plt.gca(), [0,1,2], [filled01, filled12], filled=True, cmap=cm.bone) cbar = plt.colorbar(cs) # Contour lines (non-filled). lines = ContourSet(plt.gca(), [0,1,2], [lines0, lines1, lines2], cmap=cm.cool, linewidths=3) cbar.add_lines(lines) plt.axis([-0.5, 3.5, -0.5, 4.5]) plt.title('User-specified contours') # Multiple filled contour lines can be specified in a single list of polygon # vertices along with a list of vertex kinds (code types) as described in the # Path class. This is particularly useful for polygons with holes. # Here a code type of 1 is a MOVETO, and 2 is a LINETO. plt.figure() filled01 = [ [[0,0],[3,0],[3,3],[0,3],[1,1],[1,2],[2,2],[2,1]] ] kinds01 = [ [1,2,2,2,1,2,2,2] ] cs = ContourSet(plt.gca(), [0,1], [filled01], [kinds01], filled=True) cbar = plt.colorbar(cs) plt.axis([-0.5, 3.5, -0.5, 3.5]) plt.title('User specified filled contours with holes') plt.show()
mit
clemkoa/scikit-learn
sklearn/utils/deprecation.py
5
3019
import sys import warnings import functools __all__ = ["deprecated", ] class deprecated(object): """Decorator to mark a function or class as deprecated. Issue a warning when the function is called/the class is instantiated and adds a warning to the docstring. The optional extra argument will be appended to the deprecation message and the docstring. Note: to use this with the default value for extra, put in an empty of parentheses: >>> from sklearn.utils import deprecated >>> deprecated() # doctest: +ELLIPSIS <sklearn.utils.deprecation.deprecated object at ...> >>> @deprecated() ... def some_function(): pass Parameters ---------- extra : string to be added to the deprecation messages """ # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary, # but with many changes. def __init__(self, extra=''): self.extra = extra def __call__(self, obj): """Call method Parameters ---------- obj : object """ if isinstance(obj, type): return self._decorate_class(obj) else: return self._decorate_fun(obj) def _decorate_class(self, cls): msg = "Class %s is deprecated" % cls.__name__ if self.extra: msg += "; %s" % self.extra # FIXME: we should probably reset __new__ for full generality init = cls.__init__ def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return init(*args, **kwargs) cls.__init__ = wrapped wrapped.__name__ = '__init__' wrapped.__doc__ = self._update_doc(init.__doc__) wrapped.deprecated_original = init return cls def _decorate_fun(self, fun): """Decorate function fun""" msg = "Function %s is deprecated" % fun.__name__ if self.extra: msg += "; %s" % self.extra @functools.wraps(fun) def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return fun(*args, **kwargs) wrapped.__doc__ = self._update_doc(wrapped.__doc__) return wrapped def _update_doc(self, olddoc): newdoc = "DEPRECATED" if self.extra: newdoc = "%s: %s" % (newdoc, self.extra) if olddoc: newdoc = "%s\n\n%s" % (newdoc, olddoc) return newdoc def _is_deprecated(func): """Helper to check if func is wraped by our deprecated decorator""" if sys.version_info < (3, 5): raise NotImplementedError("This is only available for python3.5 " "or above") closures = getattr(func, '__closure__', []) if closures is None: closures = [] is_deprecated = ('deprecated' in ''.join([c.cell_contents for c in closures if isinstance(c.cell_contents, str)])) return is_deprecated
bsd-3-clause
sssllliang/BuildingMachineLearningSystemsWithPython
ch11/demo_mi.py
25
3160
# This code is supporting material for the book # Building Machine Learning Systems with Python # by Willi Richert and Luis Pedro Coelho # published by PACKT Publishing # # It is made available under the MIT License import os from matplotlib import pylab import numpy as np from scipy.stats import norm, entropy from utils import CHART_DIR def mutual_info(x, y, bins=10): counts_xy, bins_x, bins_y = np.histogram2d(x, y, bins=(bins, bins)) counts_x, bins = np.histogram(x, bins=bins) counts_y, bins = np.histogram(y, bins=bins) counts_xy += 1 counts_x += 1 counts_y += 1 P_xy = counts_xy / np.sum(counts_xy, dtype=float) P_x = counts_x / np.sum(counts_x, dtype=float) P_y = counts_y / np.sum(counts_y, dtype=float) I_xy = np.sum(P_xy * np.log2(P_xy / (P_x.reshape(-1, 1) * P_y))) return I_xy / (entropy(counts_x) + entropy(counts_y)) def plot_entropy(): pylab.clf() pylab.figure(num=None, figsize=(5, 4)) title = "Entropy $H(X)$" pylab.title(title) pylab.xlabel("$P(X=$coin will show heads up$)$") pylab.ylabel("$H(X)$") pylab.xlim(xmin=0, xmax=1.1) x = np.arange(0.001, 1, 0.001) y = -x * np.log2(x) - (1 - x) * np.log2(1 - x) pylab.plot(x, y) # pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in # [0,1,2,3,4]]) pylab.autoscale(tight=True) pylab.grid(True) filename = "entropy_demo.png" pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight") def _plot_mi_func(x, y): mi = mutual_info(x, y) title = "NI($X_1$, $X_2$) = %.3f" % mi pylab.scatter(x, y) pylab.title(title) pylab.xlabel("$X_1$") pylab.ylabel("$X_2$") def plot_mi_demo(): np.random.seed(0) # to reproduce the data later on pylab.clf() pylab.figure(num=None, figsize=(8, 8)) x = np.arange(0, 10, 0.2) pylab.subplot(221) y = 0.5 * x + norm.rvs(1, scale=.01, size=len(x)) _plot_mi_func(x, y) pylab.subplot(222) y = 0.5 * x + norm.rvs(1, scale=.1, size=len(x)) _plot_mi_func(x, y) pylab.subplot(223) y = 0.5 * x + norm.rvs(1, scale=1, size=len(x)) _plot_mi_func(x, y) pylab.subplot(224) y = norm.rvs(1, scale=10, size=len(x)) _plot_mi_func(x, y) pylab.autoscale(tight=True) pylab.grid(True) filename = "mi_demo_1.png" pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight") pylab.clf() pylab.figure(num=None, figsize=(8, 8)) x = np.arange(-5, 5, 0.2) pylab.subplot(221) y = 0.5 * x ** 2 + norm.rvs(1, scale=.01, size=len(x)) _plot_mi_func(x, y) pylab.subplot(222) y = 0.5 * x ** 2 + norm.rvs(1, scale=.1, size=len(x)) _plot_mi_func(x, y) pylab.subplot(223) y = 0.5 * x ** 2 + norm.rvs(1, scale=1, size=len(x)) _plot_mi_func(x, y) pylab.subplot(224) y = 0.5 * x ** 2 + norm.rvs(1, scale=10, size=len(x)) _plot_mi_func(x, y) pylab.autoscale(tight=True) pylab.grid(True) filename = "mi_demo_2.png" pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight") if __name__ == '__main__': plot_entropy() plot_mi_demo()
mit
mwv/scikit-learn
examples/manifold/plot_mds.py
261
2616
""" ========================= Multi-dimensional scaling ========================= An illustration of the metric and non-metric MDS on generated noisy data. The reconstructed points using the metric MDS and non metric MDS are slightly shifted to avoid overlapping. """ # Author: Nelle Varoquaux <[email protected]> # Licence: BSD print(__doc__) import numpy as np from matplotlib import pyplot as plt from matplotlib.collections import LineCollection from sklearn import manifold from sklearn.metrics import euclidean_distances from sklearn.decomposition import PCA n_samples = 20 seed = np.random.RandomState(seed=3) X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float) X_true = X_true.reshape((n_samples, 2)) # Center the data X_true -= X_true.mean() similarities = euclidean_distances(X_true) # Add noise to the similarities noise = np.random.rand(n_samples, n_samples) noise = noise + noise.T noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0 similarities += noise mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed, dissimilarity="precomputed", n_jobs=1) pos = mds.fit(similarities).embedding_ nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12, dissimilarity="precomputed", random_state=seed, n_jobs=1, n_init=1) npos = nmds.fit_transform(similarities, init=pos) # Rescale the data pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum()) npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum()) # Rotate the data clf = PCA(n_components=2) X_true = clf.fit_transform(X_true) pos = clf.fit_transform(pos) npos = clf.fit_transform(npos) fig = plt.figure(1) ax = plt.axes([0., 0., 1., 1.]) plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20) plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g') plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b') plt.legend(('True position', 'MDS', 'NMDS'), loc='best') similarities = similarities.max() / similarities * 100 similarities[np.isinf(similarities)] = 0 # Plot the edges start_idx, end_idx = np.where(pos) #a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[X_true[i, :], X_true[j, :]] for i in range(len(pos)) for j in range(len(pos))] values = np.abs(similarities) lc = LineCollection(segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, values.max())) lc.set_array(similarities.flatten()) lc.set_linewidths(0.5 * np.ones(len(segments))) ax.add_collection(lc) plt.show()
bsd-3-clause
davidpng/FCS_Database
FlowAnal/FCS_subroutines/p2D_Feature_Extraction.py
1
3520
# -*- coding: utf-8 -*- """ Created on Tue 30 Dec 2014 10:29:41 AM PST This file describes a feature extraction class for N dimensions @author: David Ng, MD """ import os.path import pandas as pd import numpy as np import scipy as sp import h5py import logging import itertools log = logging.getLogger(__name__) class p2D_Feature_Extraction(object): def __init__(self,FCS,bins,**kwargs): """ bins = number of bins per axis Accessiable Parameters type bin_description histogram """ self.type = 'Full' if 'exclude_param' in kwargs: exclude = kwargs['exclude_param'] else: exclude = ['FSC-H','SSC-A','Time'] #generate list of columns to be used columns = [c for c in FCS.data.columns if c not in exclude] #generate a dictionary describing the bins to be used bin_dict = self._Generate_Bin_Dict(columns,bins) self.bin_description = bin_dict self.histogram = self._flattened_2d_histograms(FCS_data=FCS.data, columns=columns, bin_dict=bin_dict,**kwargs) def _flattened_2d_histograms(self,FCS_data,columns,bin_dict,ul=1.0,normalize=True,**kwargs): """ """ feature_space=[] for features in itertools.combinations(columns,2): dim = (bin_dict[features[0]],bin_dict[features[1]]) log.debug(features) histo2d,xbin,ybin = np.histogram2d(FCS_data[features[0]], FCS_data[features[1]], bins=dim, range=[[0,ul],[0,ul]], normed=normalize) #feature_space.extend(np.ravel(1-1/((histo2d*scaling)**0.75+1))) feature_space.extend(np.ravel(histo2d)) return sp.sparse.csr_matrix(feature_space) def _coord2sparse_histogram(self,vector_length,coordinates,normalize=True,**kwargs): """ generates a sparse matrix with normalized histogram counts each bin describes the fraction of total events within it (i.e. < 1) """ output=sp.sparse.lil_matrix((1,vector_length), dtype=np.float32) for i in coordinates: output[0,i]+=1 if normalize: return output/ len(coordinates) else: return output def _Generate_Bin_Dict(self,columns,bins): """ Performs error checking and type converion for bins """ if isinstance(bins,int): bin_dict = pd.Series([bins] * len(columns),index=columns) elif isinstance(bins,list): if len(bins) != len(columns): raise RuntimeWarning("number of bins in the list does not match the number of parameters") else: bin_dict = pd.Series(bins,columns) elif isinstance(bins,dict): if bins.keys() not in columns or columns not in bins.keys(): raise RuntimeWarning("The bin keys do not match the provided columns") else: raise RuntimeWarning("bin dict not implemented") else: raise TypeError("provided bins parameter is not supported") return bin_dict def Return_Coordinates(self,index): """ Returns the bin parameters """ pass
gpl-3.0
costypetrisor/scikit-learn
sklearn/metrics/tests/test_score_objects.py
84
14181
import pickle import numpy as np from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_true from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_not_equal from sklearn.base import BaseEstimator from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score, log_loss, precision_score, recall_score) from sklearn.metrics.cluster import adjusted_rand_score from sklearn.metrics.scorer import (check_scoring, _PredictScorer, _passthrough_scorer) from sklearn.metrics import make_scorer, get_scorer, SCORERS from sklearn.svm import LinearSVC from sklearn.pipeline import make_pipeline from sklearn.cluster import KMeans from sklearn.dummy import DummyRegressor from sklearn.linear_model import Ridge, LogisticRegression from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.datasets import make_blobs from sklearn.datasets import make_classification from sklearn.datasets import make_multilabel_classification from sklearn.datasets import load_diabetes from sklearn.cross_validation import train_test_split, cross_val_score from sklearn.grid_search import GridSearchCV from sklearn.multiclass import OneVsRestClassifier REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error'] CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro', 'roc_auc', 'average_precision', 'precision', 'precision_weighted', 'precision_macro', 'precision_micro', 'recall', 'recall_weighted', 'recall_macro', 'recall_micro', 'log_loss', 'adjusted_rand_score' # not really, but works ] MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples'] class EstimatorWithoutFit(object): """Dummy estimator to test check_scoring""" pass class EstimatorWithFit(BaseEstimator): """Dummy estimator to test check_scoring""" def fit(self, X, y): return self class EstimatorWithFitAndScore(object): """Dummy estimator to test check_scoring""" def fit(self, X, y): return self def score(self, X, y): return 1.0 class EstimatorWithFitAndPredict(object): """Dummy estimator to test check_scoring""" def fit(self, X, y): self.y = y return self def predict(self, X): return self.y class DummyScorer(object): """Dummy scorer that always returns 1.""" def __call__(self, est, X, y): return 1 def test_check_scoring(): # Test all branches of check_scoring estimator = EstimatorWithoutFit() pattern = (r"estimator should a be an estimator implementing 'fit' method," r" .* was passed") assert_raises_regexp(TypeError, pattern, check_scoring, estimator) estimator = EstimatorWithFitAndScore() estimator.fit([[1]], [1]) scorer = check_scoring(estimator) assert_true(scorer is _passthrough_scorer) assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0) estimator = EstimatorWithFitAndPredict() estimator.fit([[1]], [1]) pattern = (r"If no scoring is specified, the estimator passed should have" r" a 'score' method\. The estimator .* does not\.") assert_raises_regexp(TypeError, pattern, check_scoring, estimator) scorer = check_scoring(estimator, "accuracy") assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0) estimator = EstimatorWithFit() scorer = check_scoring(estimator, "accuracy") assert_true(isinstance(scorer, _PredictScorer)) estimator = EstimatorWithFit() scorer = check_scoring(estimator, allow_none=True) assert_true(scorer is None) def test_check_scoring_gridsearchcv(): # test that check_scoring works on GridSearchCV and pipeline. # slightly redundant non-regression test. grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]}) scorer = check_scoring(grid, "f1") assert_true(isinstance(scorer, _PredictScorer)) pipe = make_pipeline(LinearSVC()) scorer = check_scoring(pipe, "f1") assert_true(isinstance(scorer, _PredictScorer)) # check that cross_val_score definitely calls the scorer # and doesn't make any assumptions about the estimator apart from having a # fit. scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1], scoring=DummyScorer()) assert_array_equal(scores, 1) def test_make_scorer(): # Sanity check on the make_scorer factory function. f = lambda *args: 0 assert_raises(ValueError, make_scorer, f, needs_threshold=True, needs_proba=True) def test_classification_scores(): # Test classification scorers. X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = LinearSVC(random_state=0) clf.fit(X_train, y_train) for prefix, metric in [('f1', f1_score), ('precision', precision_score), ('recall', recall_score)]: score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=None, average='weighted') assert_almost_equal(score1, score2) score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=None, average='macro') assert_almost_equal(score1, score2) score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=None, average='micro') assert_almost_equal(score1, score2) score1 = get_scorer('%s' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=1) assert_almost_equal(score1, score2) # test fbeta score that takes an argument scorer = make_scorer(fbeta_score, beta=2) score1 = scorer(clf, X_test, y_test) score2 = fbeta_score(y_test, clf.predict(X_test), beta=2) assert_almost_equal(score1, score2) # test that custom scorer can be pickled unpickled_scorer = pickle.loads(pickle.dumps(scorer)) score3 = unpickled_scorer(clf, X_test, y_test) assert_almost_equal(score1, score3) # smoke test the repr: repr(fbeta_score) def test_regression_scorers(): # Test regression scorers. diabetes = load_diabetes() X, y = diabetes.data, diabetes.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = Ridge() clf.fit(X_train, y_train) score1 = get_scorer('r2')(clf, X_test, y_test) score2 = r2_score(y_test, clf.predict(X_test)) assert_almost_equal(score1, score2) def test_thresholded_scorers(): # Test scorers that take thresholds. X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = LogisticRegression(random_state=0) clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.decision_function(X_test)) score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) assert_almost_equal(score1, score2) assert_almost_equal(score1, score3) logscore = get_scorer('log_loss')(clf, X_test, y_test) logloss = log_loss(y_test, clf.predict_proba(X_test)) assert_almost_equal(-logscore, logloss) # same for an estimator without decision_function clf = DecisionTreeClassifier() clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) assert_almost_equal(score1, score2) # test with a regressor (no decision_function) reg = DecisionTreeRegressor() reg.fit(X_train, y_train) score1 = get_scorer('roc_auc')(reg, X_test, y_test) score2 = roc_auc_score(y_test, reg.predict(X_test)) assert_almost_equal(score1, score2) # Test that an exception is raised on more than two classes X, y = make_blobs(random_state=0, centers=3) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf.fit(X_train, y_train) assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test) def test_thresholded_scorers_multilabel_indicator_data(): # Test that the scorer work with multilabel-indicator format # for multilabel and multi-output multi-class classifier X, y = make_multilabel_classification(return_indicator=True, allow_unlabeled=False, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Multi-output multi-class predict_proba clf = DecisionTreeClassifier() clf.fit(X_train, y_train) y_proba = clf.predict_proba(X_test) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T) assert_almost_equal(score1, score2) # Multi-output multi-class decision_function # TODO Is there any yet? clf = DecisionTreeClassifier() clf.fit(X_train, y_train) clf._predict_proba = clf.predict_proba clf.predict_proba = None clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)] y_proba = clf.decision_function(X_test) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T) assert_almost_equal(score1, score2) # Multilabel predict_proba clf = OneVsRestClassifier(DecisionTreeClassifier()) clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.predict_proba(X_test)) assert_almost_equal(score1, score2) # Multilabel decision function clf = OneVsRestClassifier(LinearSVC(random_state=0)) clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.decision_function(X_test)) assert_almost_equal(score1, score2) def test_unsupervised_scorers(): # Test clustering scorers against gold standard labeling. # We don't have any real unsupervised Scorers yet. X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) km = KMeans(n_clusters=3) km.fit(X_train) score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test) score2 = adjusted_rand_score(y_test, km.predict(X_test)) assert_almost_equal(score1, score2) @ignore_warnings def test_raises_on_score_list(): # Test that when a list of scores is returned, we raise proper errors. X, y = make_blobs(random_state=0) f1_scorer_no_average = make_scorer(f1_score, average=None) clf = DecisionTreeClassifier() assert_raises(ValueError, cross_val_score, clf, X, y, scoring=f1_scorer_no_average) grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average, param_grid={'max_depth': [1, 2]}) assert_raises(ValueError, grid_search.fit, X, y) @ignore_warnings def test_scorer_sample_weight(): # Test that scorers support sample_weight or raise sensible errors # Unlike the metrics invariance test, in the scorer case it's harder # to ensure that, on the classifier output, weighted and unweighted # scores really should be unequal. X, y = make_classification(random_state=0) _, y_ml = make_multilabel_classification(n_samples=X.shape[0], return_indicator=True, random_state=0) split = train_test_split(X, y, y_ml, random_state=0) X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split sample_weight = np.ones_like(y_test) sample_weight[:10] = 0 # get sensible estimators for each metric sensible_regr = DummyRegressor(strategy='median') sensible_regr.fit(X_train, y_train) sensible_clf = DecisionTreeClassifier(random_state=0) sensible_clf.fit(X_train, y_train) sensible_ml_clf = DecisionTreeClassifier(random_state=0) sensible_ml_clf.fit(X_train, y_ml_train) estimator = dict([(name, sensible_regr) for name in REGRESSION_SCORERS] + [(name, sensible_clf) for name in CLF_SCORERS] + [(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]) for name, scorer in SCORERS.items(): if name in MULTILABEL_ONLY_SCORERS: target = y_ml_test else: target = y_test try: weighted = scorer(estimator[name], X_test, target, sample_weight=sample_weight) ignored = scorer(estimator[name], X_test[10:], target[10:]) unweighted = scorer(estimator[name], X_test, target) assert_not_equal(weighted, unweighted, msg="scorer {0} behaves identically when " "called with sample weights: {1} vs " "{2}".format(name, weighted, unweighted)) assert_almost_equal(weighted, ignored, err_msg="scorer {0} behaves differently when " "ignoring samples and setting sample_weight to" " 0: {1} vs {2}".format(name, weighted, ignored)) except TypeError as e: assert_true("sample_weight" in str(e), "scorer {0} raises unhelpful exception when called " "with sample weights: {1}".format(name, str(e)))
bsd-3-clause
MazamaScience/ispaq
ispaq/updater.py
1
3155
# -*- coding: utf-8 -*- """ Python module containing for updating R packages. :copyright: Mazama Science :license: GNU Lesser General Public License, Version 3 (http://www.gnu.org/copyleft/lesser.html) """ from __future__ import (absolute_import, division, print_function) import pandas as pd from rpy2 import robjects from rpy2 import rinterface from rpy2.robjects import pandas2ri # R Initialization ----------------------------------------------------- # Global R options are set here # Do now show error messages generated inside of the R packages ###robjects.r('options(show.error.messages=FALSE)') # R functions called internally ---------------------------------------- # NOTE: These functions behave exactly the same as the R versions and require # NOTE: R-compatible objects as arguments. _R_install_packages = robjects.r('utils::install.packages') def get_IRIS_package_versions(logger): """ Return a dataframe of version information for IRIS R packages used in ISPAQ. """ IRIS_packages = ['seismicRoll','IRISSeismic','IRISMustangMetrics'] # Get version information for locally installed and CRAN available IRIS_packages r_installed = robjects.r("installed.packages()[c('seismicRoll','IRISSeismic','IRISMustangMetrics'),'Version']") installed_versions = pandas2ri.ri2py(r_installed).tolist() r_available = robjects.r("available.packages()[c('seismicRoll','IRISSeismic','IRISMustangMetrics'),'Version']") cran_versions = pandas2ri.ri2py(r_available).tolist() # Find any 'old' installed packages that available for an upgrade r_old = robjects.r("old.packages()[,'Package']") old = pandas2ri.ri2py(r_old).tolist() # Create a needsUpgrade array upgrade = [False, False, False] for i in range(len(IRIS_packages)): if IRIS_packages[i] in old: upgrade[i] = True # Put information in a dataframe df = pd.DataFrame({'package': IRIS_packages, 'installed': installed_versions, 'CRAN': cran_versions, 'upgrade': upgrade}) # Reorder columns from default alphabetic df = df[['package','installed','CRAN','upgrade']] return(df) def update_IRIS_packages(logger): """ Automatically upate IRIS R packages used in ISPAQ. """ df = get_IRIS_package_versions(logger) packages_to_upgrade = df.package[df.upgrade].tolist() if len(packages_to_upgrade) == 0: logger.info('No packages need updating.\n') else: for package in packages_to_upgrade: try: # TODO: automatic package installation needs to be tested _R_install_packages(package) logger.info('Installed %s' % (package)) except Exception as e: logger.error('Unable to install %s: %s' % (package,e)) # ------------------------------------------------------------------------------ if __name__ == '__main__': import doctest doctest.testmod(exclude_empty=True)
gpl-3.0
petroniocandido/pyFTS
pyFTS/hyperparam/mvfts.py
1
18590
""" Distributed Evolutionary Hyperparameter Optimization (DEHO) for MVFTS variables: A list of dictionaries, where each dictionary contains - name: Variable name - data_label: data label - type: common | seasonal - seasonality: target_variable genotype: A dictionary containing - variables: a list with the selected variables, each instance is the index of a variable in variables - params: a list of dictionaries, where each dictionary contains {mf, npart, partitioner, alpha} """ import numpy as np import pandas as pd import math import time import random import logging from pyFTS.common import Util from pyFTS.benchmarks import Measures from pyFTS.partitioners import Grid, Entropy # , Huarng from pyFTS.common import Membership from pyFTS.models import hofts, ifts, pwfts from pyFTS.hyperparam import Util as hUtil from pyFTS.hyperparam import Evolutionary, random_search as RS from pyFTS.models.multivariate import mvfts, wmvfts, variable from pyFTS.models.seasonal import partitioner as seasonal from pyFTS.models.seasonal.common import DateTime def genotype(vars, params, tparams, f1=None, f2=None): """ Create the individual genotype :param variables: dictionary with explanatory variable names, types, and other parameters :param params: dictionary with variable hyperparameters var: {mf, npart, partitioner, alpha} :param tparams: dictionary with target variable hyperparameters var: {mf, npart, partitioner, alpha} :param f1: accuracy fitness value :param f2: parsimony fitness value :return: the genotype, a dictionary with all hyperparameters """ ind = dict( explanatory_variables=vars, explanatory_params=params, target_params = tparams, f1=f1, f2=f2 ) return ind def random_genotype(**kwargs): """ Create random genotype :return: the genotype, a dictionary with all hyperparameters """ vars = kwargs.get('variables',None) tvar = kwargs.get('target_variable',None) l = len(vars) nvar = np.random.randint(1,l,1) # the number of variables explanatory_variables = np.unique(np.random.randint(0, l, nvar)).tolist() #indexes of the variables explanatory_params = [] for v in explanatory_variables: var = vars[v] param = random_param(var) explanatory_params.append(param) target_params = random_param(tvar) return genotype( explanatory_variables, explanatory_params, target_params ) def random_param(var): if var['type'] == 'common': npart = random.randint(7, 50) else: npart = var['npart'] param = { 'mf': random.randint(1, 4), 'npart': npart, 'partitioner': 1, # random.randint(1, 2), 'alpha': random.uniform(0, .5) } return param def phenotype(individual, train, fts_method, parameters={}, **kwargs): vars = kwargs.get('variables', None) tvar = kwargs.get('target_variable', None) explanatory_vars = [] for ct, vix in enumerate(individual['explanatory_variables']): var = vars[vix] params = individual['explanatory_params'][ct] mf = phenotype_mf(params) partitioner = phenotype_partitioner(params) if var['type'] == 'common': tmp = variable.Variable(var['name'], data_label=var['data_label'], alias=var['name'], partitioner=partitioner, partitioner_specific={'mf': mf}, npart=params['npart'], alpha_cut=params['alpha'], data=train) elif var['type'] == 'seasonal': sp = {'seasonality': var['seasonality'], 'mf': mf } tmp = variable.Variable(var['name'], data_label=var['data_label'], alias=var['name'], partitioner=seasonal.TimeGridPartitioner, partitioner_specific=sp, npart=params['npart'], alpha_cut=params['alpha'], data=train) explanatory_vars.append(tmp) tparams = individual['target_params'] partitioner = phenotype_partitioner(tparams) mf = phenotype_mf(tparams) target_var = variable.Variable(tvar['name'], data_label=tvar['data_label'], alias=tvar['name'], partitioner=partitioner, partitioner_specific={'mf': mf}, npart=tparams['npart'], alpha_cut=tparams['alpha'], data=train) explanatory_vars.append(target_var) model = fts_method(explanatory_variables=explanatory_vars, target_variable=target_var, **parameters) model.fit(train, **parameters) return model def phenotype_partitioner(params): if params['partitioner'] == 1: partitioner = Grid.GridPartitioner elif params['partitioner'] == 2: partitioner = Entropy.EntropyPartitioner return partitioner def phenotype_mf(params): if params['mf'] == 1: mf = Membership.trimf elif params['mf'] == 2: mf = Membership.trapmf elif params['mf'] == 3 and params['partitioner'] != 2: mf = Membership.gaussmf else: mf = Membership.trimf return mf def evaluate(dataset, individual, **kwargs): """ Evaluate an individual using a sliding window cross validation over the dataset. :param dataset: Evaluation dataset :param individual: genotype to be tested :param window_size: The length of scrolling window for train/test on dataset :param train_rate: The train/test split ([0,1]) :param increment_rate: The increment of the scrolling window, relative to the window_size ([0,1]) :param parameters: dict with model specific arguments for fit method. :return: a tuple (len_lags, rmse) with the parsimony fitness value and the accuracy fitness value """ import logging from pyFTS.models import hofts, ifts, pwfts from pyFTS.common import Util from pyFTS.benchmarks import Measures from pyFTS.hyperparam.Evolutionary import __measures from pyFTS.hyperparam.mvfts import phenotype from pyFTS.models.multivariate import mvfts, wmvfts, partitioner, variable, cmvfts,grid, granular, common import numpy as np window_size = kwargs.get('window_size', 800) train_rate = kwargs.get('train_rate', .8) increment_rate = kwargs.get('increment_rate', .2) fts_method = kwargs.get('fts_method', wmvfts.WeightedMVFTS) parameters = kwargs.get('parameters',{}) tvar = kwargs.get('target_variable', None) if individual['f1'] is not None and individual['f2'] is not None: return { key: individual[key] for key in __measures } errors = [] lengths = [] kwargs2 = kwargs.copy() kwargs2.pop('fts_method') if 'parameters' in kwargs2: kwargs2.pop('parameters') for count, train, test in Util.sliding_window(dataset, window_size, train=train_rate, inc=increment_rate): try: model = phenotype(individual, train, fts_method=fts_method, parameters=parameters, **kwargs2) forecasts = model.predict(test) rmse = Measures.rmse(test[tvar['data_label']].values[model.max_lag:], forecasts[:-1]) lengths.append(len(model)) errors.append(rmse) except Exception as ex: logging.exception("Error") lengths.append(np.nan) errors.append(np.nan) try: _rmse = np.nanmean(errors) _len = np.nanmean(lengths) f1 = np.nansum([.6 * _rmse, .4 * np.nanstd(errors)]) f2 = np.nansum([.9 * _len, .1 * np.nanstd(lengths)]) return {'f1': f1, 'f2': f2, 'rmse': _rmse, 'size': _len } except Exception as ex: logging.exception("Error") return {'f1': np.inf, 'f2': np.inf, 'rmse': np.inf, 'size': np.inf} def crossover(population, **kwargs): """ Crossover operation between two parents :param population: the original population :return: a genotype """ import random vars = kwargs.get('variables', None) tvar = kwargs.get('target_variable', None) n = len(population) - 1 r1,r2 = 0,0 while r1 == r2: r1 = random.randint(0, n) r2 = random.randint(0, n) if population[r1]['f1'] < population[r2]['f1']: best = population[r1] worst = population[r2] else: best = population[r2] worst = population[r1] rnd = random.uniform(0, 1) nvar = len(best['explanatory_variables']) if rnd < .7 else len(worst['explanatory_variables']) explanatory_variables = [] explanatory_params = [] for ct in np.arange(nvar): if ct < len(best['explanatory_variables']) and ct < len(worst['explanatory_variables']): rnd = random.uniform(0, 1) ix = best['explanatory_variables'][ct] if rnd < .7 else worst['explanatory_variables'][ct] elif ct < len(best['explanatory_variables']): ix = best['explanatory_variables'][ct] elif ct < len(worst['explanatory_variables']): ix = worst['explanatory_variables'][ct] if ix in explanatory_variables: continue if ix in best['explanatory_variables'] and ix in worst['explanatory_variables']: bix = best['explanatory_variables'].index(ix) wix = worst['explanatory_variables'].index(ix) param = crossover_variable_params(best['explanatory_params'][bix], worst['explanatory_params'][wix], vars[ix]) elif ix in best['explanatory_variables']: bix = best['explanatory_variables'].index(ix) param = best['explanatory_params'][bix] elif ix in worst['explanatory_variables']: wix = worst['explanatory_variables'].index(ix) param = worst['explanatory_params'][wix] explanatory_variables.append(ix) explanatory_params.append(param) tparams = crossover_variable_params(best['target_params'], worst['target_params'], tvar) descendent = genotype(explanatory_variables, explanatory_params, tparams) return descendent def crossover_variable_params(best, worst, var): if var['type'] == 'common': npart = int(round(.7 * best['npart'] + .3 * worst['npart'])) else: npart = best['npart'] alpha = float(.7 * best['alpha'] + .3 * worst['alpha']) rnd = random.uniform(0, 1) mf = best['mf'] if rnd < .7 else worst['mf'] rnd = random.uniform(0, 1) partitioner = best['partitioner'] if rnd < .7 else worst['partitioner'] param = {'partitioner': partitioner, 'npart': npart, 'alpha': alpha, 'mf': mf} return param def mutation(individual, **kwargs): """ Mutation operator :param individual: an individual genotype :param pmut: individual probability o :return: """ vars = kwargs.get('variables', None) tvar = kwargs.get('target_variable', None) l = len(vars) il = len(individual['explanatory_variables']) rnd = random.uniform(0, 1) if rnd > .9 and il > 1: rnd = random.randint(0, il-1) val = individual['explanatory_variables'][rnd] individual['explanatory_variables'].remove(val) individual['explanatory_params'].pop(rnd) elif rnd < .1 and il < l: rnd = random.randint(0, l-1) while rnd in individual['explanatory_variables']: rnd = random.randint(0, l-1) individual['explanatory_variables'].append(rnd) individual['explanatory_params'].append(random_param(vars[rnd])) for ct in np.arange(len(individual['explanatory_variables'])): rnd = random.uniform(0, 1) if rnd > .5: mutate_variable_params(individual['explanatory_params'][ct], vars[ct]) rnd = random.uniform(0, 1) if rnd > .5: mutate_variable_params(individual['target_params'], tvar) individual['f1'] = None individual['f2'] = None return individual def mutation_random_search(individual, **kwargs): """ Mutation operator :param individual: an individual genotype :param pmut: individual probability o :return: """ import copy new = copy.deepcopy(individual) vars = kwargs.get('variables', None) tvar = kwargs.get('target_variable', None) l = len(vars) il = len(new['explanatory_variables']) # if il > 1: for l in range(il): il = len(new['explanatory_variables']) rnd = random.uniform(0, 1) if rnd > .5: rnd = random.randint(0, il-1) if rnd < il and il > 1: val = individual['explanatory_variables'][rnd] new['explanatory_variables'].remove(val) new['explanatory_params'].pop(rnd) else: rnd = random.randint(0, l-1) while rnd in new['explanatory_variables']: rnd = random.randint(0, l-1) new['explanatory_variables'].append(rnd) new['explanatory_params'].append(random_param(vars[rnd])) for ct in np.arange(len(new['explanatory_variables'])): rnd = random.uniform(0, 1) if rnd > .5: mutate_variable_params(new['explanatory_params'][ct], vars[ct]) rnd = random.uniform(0, 1) if rnd > .5: mutate_variable_params(new['target_params'], tvar) new['f1'] = None new['f2'] = None return new def mutate_variable_params(param, var): if var['type']=='common': param['npart'] = min(50, max(3, int(param['npart'] + np.random.normal(0, 4)))) param['alpha'] = min(.5, max(0, param['alpha'] + np.random.normal(0, .5))) param['mf'] = random.randint(1, 4) param['partitioner'] = random.randint(1, 2) def execute(datasetname, dataset, **kwargs): """ Batch execution of Distributed Evolutionary Hyperparameter Optimization (DEHO) for monovariate methods :param datasetname: :param dataset: The time series to optimize the FTS :keyword database_file: :keyword experiments: :keyword distributed: :keyword ngen: An integer value with the maximum number of generations, default value: 30 :keyword mgen: An integer value with the maximum number of generations without improvement to stop, default value 7 :keyword npop: An integer value with the population size, default value: 20 :keyword pcross: A float value between 0 and 1 with the probability of crossover, default: .5 :keyword psel: A float value between 0 and 1 with the probability of selection, default: .5 :keyword pmut: A float value between 0 and 1 with the probability of mutation, default: .3 :keyword fts_method: The MVFTS method to optimize :keyword parameters: dict with model specific arguments for fts_method :keyword elitism: A boolean value indicating if the best individual must always survive to next population :keyword selection_operator: a function that receives the whole population and return a selected individual :keyword window_size: An integer value with the the length of scrolling window for train/test on dataset :keyword train_rate: A float value between 0 and 1 with the train/test split ([0,1]) :keyword increment_rate: A float value between 0 and 1 with the the increment of the scrolling window, relative to the window_size ([0,1]) :keyword collect_statistics: A boolean value indicating to collect statistics for each generation :keyword distributed: A value indicating it the execution will be local and sequential (distributed=False), or parallel and distributed (distributed='dispy' or distributed='spark') :keyword cluster: If distributed='dispy' the list of cluster nodes, else if distributed='spark' it is the master node :return: the best genotype """ experiments = kwargs.get('experiments', 30) distributed = kwargs.get('distributed', False) fts_method = kwargs.get('fts_method', hofts.WeightedHighOrderFTS) shortname = str(fts_method.__module__).split('.')[-1] kwargs['mutation_operator'] = mutation kwargs['crossover_operator'] = crossover kwargs['evaluation_operator'] = evaluate kwargs['random_individual'] = random_genotype if distributed == 'dispy': from pyFTS.distributed import dispy as dUtil import dispy nodes = kwargs.get('nodes', ['127.0.0.1']) cluster, http_server = dUtil.start_dispy_cluster(evaluate, nodes=nodes) kwargs['cluster'] = cluster ret = [] for i in np.arange(experiments): print("Experiment {}".format(i)) start = time.time() ret, statistics = Evolutionary.GeneticAlgorithm(dataset, **kwargs) end = time.time() ret['time'] = end - start experiment = {'individual': ret, 'statistics': statistics} ret = process_experiment(shortname, experiment, datasetname) if distributed == 'dispy': dUtil.stop_dispy_cluster(cluster, http_server) return ret def process_experiment(fts_method, result, datasetname): """ Persist the results of an DEHO execution in sqlite database (best hyperparameters) and json file (generation statistics) :param fts_method: :param result: :param datasetname: :param conn: :return: """ log_result(datasetname, fts_method, result['individual']) persist_statistics(datasetname, result['statistics']) return result['individual'] def persist_statistics(datasetname, statistics): import json with open('statistics_{}.json'.format(datasetname), 'w') as file: file.write(json.dumps(statistics)) def log_result(datasetname, fts_method, result): import json with open('result_{}{}.json'.format(fts_method,datasetname), 'a+') as file: file.write(json.dumps(result)) print(result) def random_search(datasetname, dataset, **kwargs): experiments = kwargs.get('experiments', 30) distributed = kwargs.get('distributed', False) fts_method = kwargs.get('fts_method', hofts.WeightedHighOrderFTS) shortname = str(fts_method.__module__).split('.')[-1] kwargs['mutation_operator'] = mutation_random_search kwargs['evaluation_operator'] = evaluate kwargs['random_individual'] = random_genotype ret = [] for i in np.arange(experiments): print("Experiment {}".format(i)) start = time.time() ret, statistics = RS.execute (dataset, **kwargs) end = time.time() ret['time'] = end - start experiment = {'individual': ret, 'statistics': statistics} ret = process_experiment(shortname, experiment, datasetname) return ret
gpl-3.0
smartscheduling/scikit-learn-categorical-tree
examples/covariance/plot_sparse_cov.py
300
5078
""" ====================================== Sparse inverse covariance estimation ====================================== Using the GraphLasso estimator to learn a covariance and sparse precision from a small number of samples. To estimate a probabilistic model (e.g. a Gaussian model), estimating the precision matrix, that is the inverse covariance matrix, is as important as estimating the covariance matrix. Indeed a Gaussian model is parametrized by the precision matrix. To be in favorable recovery conditions, we sample the data from a model with a sparse inverse covariance matrix. In addition, we ensure that the data is not too much correlated (limiting the largest coefficient of the precision matrix) and that there a no small coefficients in the precision matrix that cannot be recovered. In addition, with a small number of observations, it is easier to recover a correlation matrix rather than a covariance, thus we scale the time series. Here, the number of samples is slightly larger than the number of dimensions, thus the empirical covariance is still invertible. However, as the observations are strongly correlated, the empirical covariance matrix is ill-conditioned and as a result its inverse --the empirical precision matrix-- is very far from the ground truth. If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number of samples is small, we need to shrink a lot. As a result, the Ledoit-Wolf precision is fairly close to the ground truth precision, that is not far from being diagonal, but the off-diagonal structure is lost. The l1-penalized estimator can recover part of this off-diagonal structure. It learns a sparse precision. It is not able to recover the exact sparsity pattern: it detects too many non-zero coefficients. However, the highest non-zero coefficients of the l1 estimated correspond to the non-zero coefficients in the ground truth. Finally, the coefficients of the l1 precision estimate are biased toward zero: because of the penalty, they are all smaller than the corresponding ground truth value, as can be seen on the figure. Note that, the color range of the precision matrices is tweaked to improve readability of the figure. The full range of values of the empirical precision is not displayed. The alpha parameter of the GraphLasso setting the sparsity of the model is set by internal cross-validation in the GraphLassoCV. As can be seen on figure 2, the grid to compute the cross-validation score is iteratively refined in the neighborhood of the maximum. """ print(__doc__) # author: Gael Varoquaux <[email protected]> # License: BSD 3 clause # Copyright: INRIA import numpy as np from scipy import linalg from sklearn.datasets import make_sparse_spd_matrix from sklearn.covariance import GraphLassoCV, ledoit_wolf import matplotlib.pyplot as plt ############################################################################## # Generate the data n_samples = 60 n_features = 20 prng = np.random.RandomState(1) prec = make_sparse_spd_matrix(n_features, alpha=.98, smallest_coef=.4, largest_coef=.7, random_state=prng) cov = linalg.inv(prec) d = np.sqrt(np.diag(cov)) cov /= d cov /= d[:, np.newaxis] prec *= d prec *= d[:, np.newaxis] X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) X -= X.mean(axis=0) X /= X.std(axis=0) ############################################################################## # Estimate the covariance emp_cov = np.dot(X.T, X) / n_samples model = GraphLassoCV() model.fit(X) cov_ = model.covariance_ prec_ = model.precision_ lw_cov_, _ = ledoit_wolf(X) lw_prec_ = linalg.inv(lw_cov_) ############################################################################## # Plot the results plt.figure(figsize=(10, 6)) plt.subplots_adjust(left=0.02, right=0.98) # plot the covariances covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_), ('GraphLasso', cov_), ('True', cov)] vmax = cov_.max() for i, (name, this_cov) in enumerate(covs): plt.subplot(2, 4, i + 1) plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r) plt.xticks(()) plt.yticks(()) plt.title('%s covariance' % name) # plot the precisions precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_), ('GraphLasso', prec_), ('True', prec)] vmax = .9 * prec_.max() for i, (name, this_prec) in enumerate(precs): ax = plt.subplot(2, 4, i + 5) plt.imshow(np.ma.masked_equal(this_prec, 0), interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r) plt.xticks(()) plt.yticks(()) plt.title('%s precision' % name) ax.set_axis_bgcolor('.7') # plot the model selection metric plt.figure(figsize=(4, 3)) plt.axes([.2, .15, .75, .7]) plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-') plt.axvline(model.alpha_, color='.5') plt.title('Model selection') plt.ylabel('Cross-validation score') plt.xlabel('alpha') plt.show()
bsd-3-clause
mblondel/scikit-learn
examples/cluster/plot_agglomerative_clustering_metrics.py
402
4492
""" Agglomerative clustering with different metrics =============================================== Demonstrates the effect of different metrics on the hierarchical clustering. The example is engineered to show the effect of the choice of different metrics. It is applied to waveforms, which can be seen as high-dimensional vector. Indeed, the difference between metrics is usually more pronounced in high dimension (in particular for euclidean and cityblock). We generate data from three groups of waveforms. Two of the waveforms (waveform 1 and waveform 2) are proportional one to the other. The cosine distance is invariant to a scaling of the data, as a result, it cannot distinguish these two waveforms. Thus even with no noise, clustering using this distance will not separate out waveform 1 and 2. We add observation noise to these waveforms. We generate very sparse noise: only 6% of the time points contain noise. As a result, the l1 norm of this noise (ie "cityblock" distance) is much smaller than it's l2 norm ("euclidean" distance). This can be seen on the inter-class distance matrices: the values on the diagonal, that characterize the spread of the class, are much bigger for the Euclidean distance than for the cityblock distance. When we apply clustering to the data, we find that the clustering reflects what was in the distance matrices. Indeed, for the Euclidean distance, the classes are ill-separated because of the noise, and thus the clustering does not separate the waveforms. For the cityblock distance, the separation is good and the waveform classes are recovered. Finally, the cosine distance does not separate at all waveform 1 and 2, thus the clustering puts them in the same cluster. """ # Author: Gael Varoquaux # License: BSD 3-Clause or CC-0 import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import pairwise_distances np.random.seed(0) # Generate waveform data n_features = 2000 t = np.pi * np.linspace(0, 1, n_features) def sqr(x): return np.sign(np.cos(x)) X = list() y = list() for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]): for _ in range(30): phase_noise = .01 * np.random.normal() amplitude_noise = .04 * np.random.normal() additional_noise = 1 - 2 * np.random.rand(n_features) # Make the noise sparse additional_noise[np.abs(additional_noise) < .997] = 0 X.append(12 * ((a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise))) + additional_noise)) y.append(i) X = np.array(X) y = np.array(y) n_clusters = 3 labels = ('Waveform 1', 'Waveform 2', 'Waveform 3') # Plot the ground-truth labelling plt.figure() plt.axes([0, 0, 1, 1]) for l, c, n in zip(range(n_clusters), 'rgb', labels): lines = plt.plot(X[y == l].T, c=c, alpha=.5) lines[0].set_label(n) plt.legend(loc='best') plt.axis('tight') plt.axis('off') plt.suptitle("Ground truth", size=20) # Plot the distances for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): avg_dist = np.zeros((n_clusters, n_clusters)) plt.figure(figsize=(5, 4.5)) for i in range(n_clusters): for j in range(n_clusters): avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j], metric=metric).mean() avg_dist /= avg_dist.max() for i in range(n_clusters): for j in range(n_clusters): plt.text(i, j, '%5.3f' % avg_dist[i, j], verticalalignment='center', horizontalalignment='center') plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2, vmin=0) plt.xticks(range(n_clusters), labels, rotation=45) plt.yticks(range(n_clusters), labels) plt.colorbar() plt.suptitle("Interclass %s distances" % metric, size=18) plt.tight_layout() # Plot clustering results for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): model = AgglomerativeClustering(n_clusters=n_clusters, linkage="average", affinity=metric) model.fit(X) plt.figure() plt.axes([0, 0, 1, 1]) for l, c in zip(np.arange(model.n_clusters), 'rgbk'): plt.plot(X[model.labels_ == l].T, c=c, alpha=.5) plt.axis('tight') plt.axis('off') plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20) plt.show()
bsd-3-clause
qifeigit/scikit-learn
examples/calibration/plot_calibration_curve.py
225
5903
""" ============================== Probability Calibration curves ============================== When performing classification one often wants to predict not only the class label, but also the associated probability. This probability gives some kind of confidence on the prediction. This example demonstrates how to display how well calibrated the predicted probabilities are and how to calibrate an uncalibrated classifier. The experiment is performed on an artificial dataset for binary classification with 100.000 samples (1.000 of them are used for model fitting) with 20 features. Of the 20 features, only 2 are informative and 10 are redundant. The first figure shows the estimated probabilities obtained with logistic regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic calibration and sigmoid calibration. The calibration performance is evaluated with Brier score, reported in the legend (the smaller the better). One can observe here that logistic regression is well calibrated while raw Gaussian naive Bayes performs very badly. This is because of the redundant features which violate the assumption of feature-independence and result in an overly confident classifier, which is indicated by the typical transposed-sigmoid curve. Calibration of the probabilities of Gaussian naive Bayes with isotonic regression can fix this issue as can be seen from the nearly diagonal calibration curve. Sigmoid calibration also improves the brier score slightly, albeit not as strongly as the non-parametric isotonic regression. This can be attributed to the fact that we have plenty of calibration data such that the greater flexibility of the non-parametric model can be exploited. The second figure shows the calibration curve of a linear support-vector classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian naive Bayes: the calibration curve has a sigmoid curve, which is typical for an under-confident classifier. In the case of LinearSVC, this is caused by the margin property of the hinge loss, which lets the model focus on hard samples that are close to the decision boundary (the support vectors). Both kinds of calibration can fix this issue and yield nearly identical results. This shows that sigmoid calibration can deal with situations where the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC) but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes). """ print(__doc__) # Author: Alexandre Gramfort <[email protected]> # Jan Hendrik Metzen <[email protected]> # License: BSD Style. import matplotlib.pyplot as plt from sklearn import datasets from sklearn.naive_bayes import GaussianNB from sklearn.svm import LinearSVC from sklearn.linear_model import LogisticRegression from sklearn.metrics import (brier_score_loss, precision_score, recall_score, f1_score) from sklearn.calibration import CalibratedClassifierCV, calibration_curve from sklearn.cross_validation import train_test_split # Create dataset of classification task with many redundant and few # informative features X, y = datasets.make_classification(n_samples=100000, n_features=20, n_informative=2, n_redundant=10, random_state=42) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99, random_state=42) def plot_calibration_curve(est, name, fig_index): """Plot calibration curve for est w/o and with calibration. """ # Calibrated with isotonic calibration isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic') # Calibrated with sigmoid calibration sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid') # Logistic regression with no calibration as baseline lr = LogisticRegression(C=1., solver='lbfgs') fig = plt.figure(fig_index, figsize=(10, 10)) ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax2 = plt.subplot2grid((3, 1), (2, 0)) ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated") for clf, name in [(lr, 'Logistic'), (est, name), (isotonic, name + ' + Isotonic'), (sigmoid, name + ' + Sigmoid')]: clf.fit(X_train, y_train) y_pred = clf.predict(X_test) if hasattr(clf, "predict_proba"): prob_pos = clf.predict_proba(X_test)[:, 1] else: # use decision function prob_pos = clf.decision_function(X_test) prob_pos = \ (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min()) clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max()) print("%s:" % name) print("\tBrier: %1.3f" % (clf_score)) print("\tPrecision: %1.3f" % precision_score(y_test, y_pred)) print("\tRecall: %1.3f" % recall_score(y_test, y_pred)) print("\tF1: %1.3f\n" % f1_score(y_test, y_pred)) fraction_of_positives, mean_predicted_value = \ calibration_curve(y_test, prob_pos, n_bins=10) ax1.plot(mean_predicted_value, fraction_of_positives, "s-", label="%s (%1.3f)" % (name, clf_score)) ax2.hist(prob_pos, range=(0, 1), bins=10, label=name, histtype="step", lw=2) ax1.set_ylabel("Fraction of positives") ax1.set_ylim([-0.05, 1.05]) ax1.legend(loc="lower right") ax1.set_title('Calibration plots (reliability curve)') ax2.set_xlabel("Mean predicted value") ax2.set_ylabel("Count") ax2.legend(loc="upper center", ncol=2) plt.tight_layout() # Plot calibration cuve for Gaussian Naive Bayes plot_calibration_curve(GaussianNB(), "Naive Bayes", 1) # Plot calibration cuve for Linear SVC plot_calibration_curve(LinearSVC(), "SVC", 2) plt.show()
bsd-3-clause
WoodResourcesGroup/EPIC_AllPowerLabs
LS_example.py
1
1387
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Oct 15 13:32:52 2017 @author: jdlara """ import matplotlib.pyplot as plt import numpy as np from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error, r2_score # Load the diabetes dataset diabetes = datasets.load_diabetes() # Use only one feature diabetes_X = diabetes.data[:, np.newaxis, 2] # Split the data into training/testing sets diabetes_X_train = diabetes_X[:-20] diabetes_X_test = diabetes_X[-20:] # Split the targets into training/testing sets diabetes_y_train = diabetes.target[:-20] diabetes_y_test = diabetes.target[-20:] # Create linear regression object regr = linear_model.LinearRegression() # Train the model using the training sets regr.fit(diabetes_X_train, diabetes_y_train) # Make predictions using the testing set diabetes_y_pred = regr.predict(diabetes_X_test) # The coefficients print('Coefficients: \n', regr.coef_) # The mean squared error print("Mean squared error: %.2f" % mean_squared_error(diabetes_y_test, diabetes_y_pred)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred)) # Plot outputs plt.scatter(diabetes_X_test, diabetes_y_test, color='black') plt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3) plt.xticks(()) plt.yticks(()) plt.show()
mit
SAGridOps/SoftwareTests
scikit-learn/classifiersTest.py
1
1066
import time import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.svm import LinearSVC from sklearn.ensemble import RandomForestClassifier timer = time.time() classifiers = [LogisticRegression(), LinearSVC(), KNeighborsClassifier(), RandomForestClassifier(), SVC()] XandY = np.loadtxt('train1000.csv', delimiter=',', skiprows=1) np.random.shuffle(XandY) X = XandY[:,1:] Y = XandY[:,0] nX = np.shape(X)[0] XTrain = X[:0.7*nX,:] YTrain = Y[:0.7*nX] XTest = X[0.7*nX:,:] YTest = Y[0.7*nX:] for classifier in classifiers: clf = classifier clf.fit(XTrain,YTrain) trainingAccuracy = clf.score(XTrain,YTrain) testAccuracy = clf.score(XTest,YTest) print str(classifier) print 'training accuracy = ' + str(trainingAccuracy) print ' test accuracy is = ' + str(testAccuracy) print "**********" print "\n" print "\n" print("total time = " + str(time.time()-timer) + " secs")
mpl-2.0
anntzer/scikit-learn
examples/neighbors/plot_nearest_centroid.py
25
1818
""" =============================== Nearest Centroid Classification =============================== Sample usage of Nearest Centroid classification. It will plot the decision boundaries for each class. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn import datasets from sklearn.neighbors import NearestCentroid n_neighbors = 15 # import some data to play with iris = datasets.load_iris() # we only take the first two features. We could avoid this ugly # slicing by using a two-dim dataset X = iris.data[:, :2] y = iris.target h = .02 # step size in the mesh # Create color maps cmap_light = ListedColormap(['orange', 'cyan', 'cornflowerblue']) cmap_bold = ListedColormap(['darkorange', 'c', 'darkblue']) for shrinkage in [None, .2]: # we create an instance of Neighbours Classifier and fit the data. clf = NearestCentroid(shrink_threshold=shrinkage) clf.fit(X, y) y_pred = clf.predict(X) print(shrinkage, np.mean(y == y_pred)) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z, cmap=cmap_light) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold, edgecolor='k', s=20) plt.title("3-Class classification (shrink_threshold=%r)" % shrinkage) plt.axis('tight') plt.show()
bsd-3-clause
GoogleCloudPlatform/ml-on-gcp
example_zoo/tensorflow/probability/bayesian_neural_network/trainer/bayesian_neural_network.py
1
13561
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Trains a Bayesian neural network to classify MNIST digits. The architecture is LeNet-5 [1]. #### References [1]: Yann LeCun, Leon Bottou, Yoshua Bengio, and Patrick Haffner. Gradient-based learning applied to document recognition. _Proceedings of the IEEE_, 1998. http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from trainer.tfgfile_wrapper import tfgfile_wrapper import os import warnings # Dependency imports from absl import flags flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.") import matplotlib matplotlib.use("Agg") from matplotlib import figure # pylint: disable=g-import-not-at-top from matplotlib.backends import backend_agg import numpy as np import tensorflow as tf import tensorflow_probability as tfp from tensorflow.contrib.learn.python.learn.datasets import mnist # TODO(b/78137893): Integration tests currently fail with seaborn imports. warnings.simplefilter(action="ignore") try: import seaborn as sns # pylint: disable=g-import-not-at-top HAS_SEABORN = True except ImportError: HAS_SEABORN = False tfd = tfp.distributions IMAGE_SHAPE = [28, 28, 1] flags.DEFINE_float("learning_rate", default=0.001, help="Initial learning rate.") flags.DEFINE_integer("max_steps", default=6000, help="Number of training steps to run.") flags.DEFINE_integer("batch_size", default=128, help="Batch size.") flags.DEFINE_string("data_dir", default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"), "bayesian_neural_network/data"), help="Directory where data is stored (if using real data).") flags.DEFINE_string( "model_dir", default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"), "bayesian_neural_network/"), help="Directory to put the model's fit.") flags.DEFINE_integer("viz_steps", default=400, help="Frequency at which save visualizations.") flags.DEFINE_integer("num_monte_carlo", default=50, help="Network draws to compute predictive probabilities.") flags.DEFINE_bool("fake_data", default=None, help="If true, uses fake data. Defaults to real data.") FLAGS = flags.FLAGS @tfgfile_wrapper def plot_weight_posteriors(names, qm_vals, qs_vals, fname): """Save a PNG plot with histograms of weight means and stddevs. Args: names: A Python `iterable` of `str` variable names. qm_vals: A Python `iterable`, the same length as `names`, whose elements are Numpy `array`s, of any shape, containing posterior means of weight varibles. qs_vals: A Python `iterable`, the same length as `names`, whose elements are Numpy `array`s, of any shape, containing posterior standard deviations of weight varibles. fname: Python `str` filename to save the plot to. """ fig = figure.Figure(figsize=(6, 3)) canvas = backend_agg.FigureCanvasAgg(fig) ax = fig.add_subplot(1, 2, 1) for n, qm in zip(names, qm_vals): sns.distplot(qm.flatten(), ax=ax, label=n) ax.set_title("weight means") ax.set_xlim([-1.5, 1.5]) ax.legend() ax = fig.add_subplot(1, 2, 2) for n, qs in zip(names, qs_vals): sns.distplot(qs.flatten(), ax=ax) ax.set_title("weight stddevs") ax.set_xlim([0, 1.]) fig.tight_layout() canvas.print_figure(fname, format="png") print("saved {}".format(fname)) @tfgfile_wrapper def plot_heldout_prediction(input_vals, probs, fname, n=10, title=""): """Save a PNG plot visualizing posterior uncertainty on heldout data. Args: input_vals: A `float`-like Numpy `array` of shape `[num_heldout] + IMAGE_SHAPE`, containing heldout input images. probs: A `float`-like Numpy array of shape `[num_monte_carlo, num_heldout, num_classes]` containing Monte Carlo samples of class probabilities for each heldout sample. fname: Python `str` filename to save the plot to. n: Python `int` number of datapoints to vizualize. title: Python `str` title for the plot. """ fig = figure.Figure(figsize=(9, 3*n)) canvas = backend_agg.FigureCanvasAgg(fig) for i in range(n): ax = fig.add_subplot(n, 3, 3*i + 1) ax.imshow(input_vals[i, :].reshape(IMAGE_SHAPE[:-1]), interpolation="None") ax = fig.add_subplot(n, 3, 3*i + 2) for prob_sample in probs: sns.barplot(np.arange(10), prob_sample[i, :], alpha=0.1, ax=ax) ax.set_ylim([0, 1]) ax.set_title("posterior samples") ax = fig.add_subplot(n, 3, 3*i + 3) sns.barplot(np.arange(10), np.mean(probs[:, i, :], axis=0), ax=ax) ax.set_ylim([0, 1]) ax.set_title("predictive probs") fig.suptitle(title) fig.tight_layout() canvas.print_figure(fname, format="png") print("saved {}".format(fname)) def build_input_pipeline(mnist_data, batch_size, heldout_size): """Build an Iterator switching between train and heldout data.""" # Build an iterator over training batches. training_dataset = tf.data.Dataset.from_tensor_slices( (mnist_data.train.images, np.int32(mnist_data.train.labels))) training_batches = training_dataset.shuffle( 50000, reshuffle_each_iteration=True).repeat().batch(batch_size) training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches) # Build a iterator over the heldout set with batch_size=heldout_size, # i.e., return the entire heldout set as a constant. heldout_dataset = tf.data.Dataset.from_tensor_slices( (mnist_data.validation.images, np.int32(mnist_data.validation.labels))) heldout_frozen = (heldout_dataset.take(heldout_size). repeat().batch(heldout_size)) heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_frozen) # Combine these into a feedable iterator that can switch between training # and validation inputs. handle = tf.compat.v1.placeholder(tf.string, shape=[]) feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle( handle, training_batches.output_types, training_batches.output_shapes) images, labels = feedable_iterator.get_next() return images, labels, handle, training_iterator, heldout_iterator def build_fake_data(num_examples=10): """Build fake MNIST-style data for unit testing.""" class Dummy(object): pass num_examples = 10 mnist_data = Dummy() mnist_data.train = Dummy() mnist_data.train.images = np.float32(np.random.randn( num_examples, *IMAGE_SHAPE)) mnist_data.train.labels = np.int32(np.random.permutation( np.arange(num_examples))) mnist_data.train.num_examples = num_examples mnist_data.validation = Dummy() mnist_data.validation.images = np.float32(np.random.randn( num_examples, *IMAGE_SHAPE)) mnist_data.validation.labels = np.int32(np.random.permutation( np.arange(num_examples))) mnist_data.validation.num_examples = num_examples return mnist_data def main(argv): del argv # unused if tf.io.gfile.exists(FLAGS.model_dir): tf.compat.v1.logging.warning( "Warning: deleting old log directory at {}".format(FLAGS.model_dir)) tf.io.gfile.rmtree(FLAGS.model_dir) tf.io.gfile.makedirs(FLAGS.model_dir) if FLAGS.fake_data: mnist_data = build_fake_data() else: mnist_data = mnist.read_data_sets(FLAGS.data_dir, reshape=False) (images, labels, handle, training_iterator, heldout_iterator) = build_input_pipeline( mnist_data, FLAGS.batch_size, mnist_data.validation.num_examples) # Build a Bayesian LeNet5 network. We use the Flipout Monte Carlo estimator # for the convolution and fully-connected layers: this enables lower # variance stochastic gradients than naive reparameterization. with tf.compat.v1.name_scope("bayesian_neural_net", values=[images]): neural_net = tf.keras.Sequential([ tfp.layers.Convolution2DFlipout(6, kernel_size=5, padding="SAME", activation=tf.nn.relu), tf.keras.layers.MaxPooling2D(pool_size=[2, 2], strides=[2, 2], padding="SAME"), tfp.layers.Convolution2DFlipout(16, kernel_size=5, padding="SAME", activation=tf.nn.relu), tf.keras.layers.MaxPooling2D(pool_size=[2, 2], strides=[2, 2], padding="SAME"), tfp.layers.Convolution2DFlipout(120, kernel_size=5, padding="SAME", activation=tf.nn.relu), tf.keras.layers.Flatten(), tfp.layers.DenseFlipout(84, activation=tf.nn.relu), tfp.layers.DenseFlipout(10) ]) logits = neural_net(images) labels_distribution = tfd.Categorical(logits=logits) # Compute the -ELBO as the loss, averaged over the batch size. neg_log_likelihood = -tf.reduce_mean( input_tensor=labels_distribution.log_prob(labels)) kl = sum(neural_net.losses) / mnist_data.train.num_examples elbo_loss = neg_log_likelihood + kl # Build metrics for evaluation. Predictions are formed from a single forward # pass of the probabilistic layers. They are cheap but noisy predictions. predictions = tf.argmax(input=logits, axis=1) accuracy, accuracy_update_op = tf.compat.v1.metrics.accuracy( labels=labels, predictions=predictions) # Extract weight posterior statistics for layers with weight distributions # for later visualization. names = [] qmeans = [] qstds = [] for i, layer in enumerate(neural_net.layers): try: q = layer.kernel_posterior except AttributeError: continue names.append("Layer {}".format(i)) qmeans.append(q.mean()) qstds.append(q.stddev()) with tf.compat.v1.name_scope("train"): optimizer = tf.compat.v1.train.AdamOptimizer( learning_rate=FLAGS.learning_rate) train_op = optimizer.minimize(elbo_loss) init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer()) with tf.compat.v1.Session() as sess: sess.run(init_op) # Run the training loop. train_handle = sess.run(training_iterator.string_handle()) heldout_handle = sess.run(heldout_iterator.string_handle()) for step in range(FLAGS.max_steps): _ = sess.run([train_op, accuracy_update_op], feed_dict={handle: train_handle}) if step % 100 == 0: loss_value, accuracy_value = sess.run( [elbo_loss, accuracy], feed_dict={handle: train_handle}) print("Step: {:>3d} Loss: {:.3f} Accuracy: {:.3f}".format( step, loss_value, accuracy_value)) if (step+1) % FLAGS.viz_steps == 0: # Compute log prob of heldout set by averaging draws from the model: # p(heldout | train) = int_model p(heldout|model) p(model|train) # ~= 1/n * sum_{i=1}^n p(heldout | model_i) # where model_i is a draw from the posterior p(model|train). probs = np.asarray([sess.run((labels_distribution.probs), feed_dict={handle: heldout_handle}) for _ in range(FLAGS.num_monte_carlo)]) mean_probs = np.mean(probs, axis=0) image_vals, label_vals = sess.run((images, labels), feed_dict={handle: heldout_handle}) heldout_lp = np.mean(np.log(mean_probs[np.arange(mean_probs.shape[0]), label_vals.flatten()])) print(" ... Held-out nats: {:.3f}".format(heldout_lp)) qm_vals, qs_vals = sess.run((qmeans, qstds)) if HAS_SEABORN: plot_weight_posteriors(names, qm_vals, qs_vals, fname=os.path.join( FLAGS.model_dir, "step{:05d}_weights.png".format(step))) plot_heldout_prediction(image_vals, probs, fname=os.path.join( FLAGS.model_dir, "step{:05d}_pred.png".format(step)), title="mean heldout logprob {:.2f}" .format(heldout_lp)) if __name__ == "__main__": tf.compat.v1.app.run()
apache-2.0
cogmission/nupic.research
projects/sequence_prediction/continuous_sequence/run_tm_model_compare_sdr_cla_classifier.py
2
18776
## ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import csv import importlib from optparse import OptionParser import matplotlib.gridspec as gridspec from matplotlib import rcParams import matplotlib.pyplot as plt import pandas as pd from errorMetrics import * from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder from nupic.frameworks.opf.metrics import MetricSpec from nupic.frameworks.opf.modelfactory import ModelFactory from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager from nupic.frameworks.opf import metrics from htmresearch.frameworks.opf.clamodel_custom import CLAModel_custom import nupic_output from htmresearch.algorithms.sdr_classifier import SDRClassifier from plot import computeLikelihood, plotAccuracy rcParams.update({'figure.autolayout': True}) plt.ion() DATA_DIR = "./data" MODEL_PARAMS_DIR = "./model_params" def getMetricSpecs(predictedField, stepsAhead=5): _METRIC_SPECS = ( MetricSpec(field=predictedField, metric='multiStep', inferenceElement='multiStepBestPredictions', params={'errorMetric': 'negativeLogLikelihood', 'window': 1000, 'steps': stepsAhead}), MetricSpec(field=predictedField, metric='multiStep', inferenceElement='multiStepBestPredictions', params={'errorMetric': 'nrmse', 'window': 1000, 'steps': stepsAhead}), ) return _METRIC_SPECS def createModel(modelParams): model = ModelFactory.create(modelParams) model.enableInference({"predictedField": predictedField}) return model def getModelParamsFromName(dataSet): if (dataSet == "nyc_taxi" or dataSet == "nyc_taxi_perturb" or dataSet == "nyc_taxi_perturb_baseline"): dataSet = "nyc_taxi" importName = "model_params.%s_model_params" % ( dataSet.replace(" ", "_").replace("-", "_") ) print "Importing model params from %s" % importName try: importedModelParams = importlib.import_module(importName).MODEL_PARAMS except ImportError: raise Exception("No model params exist for '%s'. Run swarm first!" % dataSet) return importedModelParams def _getArgs(): parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]" "\n\nCompare TM performance with trivial predictor using " "model outputs in prediction directory " "and outputting results to result directory.") parser.add_option("-d", "--dataSet", type=str, default='nyc_taxi', dest="dataSet", help="DataSet Name, choose from rec-center-hourly, nyc_taxi") parser.add_option("-p", "--plot", default=False, dest="plot", help="Set to True to plot result") parser.add_option("--stepsAhead", help="How many steps ahead to predict. [default: %default]", default=5, type=int) parser.add_option("--noise", type=float, default=0, dest="noise", help="Percent of random noise at each step") (options, remainder) = parser.parse_args() print options return options, remainder def getInputRecord(df, predictedField, i, noise=0): value = float(df[predictedField][i]) if noise > 0: noiseValue = np.random.normal(scale=(value * noise)) value += noiseValue value = max(0, value) value = min(40000, value) print " time {}, value {} noise {}".format(df["timeofday"][i], value, noiseValue) inputRecord = { predictedField: value, "timeofday": float(df["timeofday"][i]), "dayofweek": float(df["dayofweek"][i]), } return inputRecord def printTPRegionParams(tpregion): """ Note: assumes we are using TemporalMemory/TPShim in the TPRegion """ tm = tpregion.getSelf()._tfdr print "------------PY TemporalMemory Parameters ------------------" print "numberOfCols =", tm.columnDimensions print "cellsPerColumn =", tm.cellsPerColumn print "minThreshold =", tm.minThreshold print "activationThreshold =", tm.activationThreshold print "newSynapseCount =", tm.maxNewSynapseCount print "initialPerm =", tm.initialPermanence print "connectedPerm =", tm.connectedPermanence print "permanenceInc =", tm.permanenceIncrement print "permanenceDec =", tm.permanenceDecrement print "predictedSegmentDecrement=", tm.predictedSegmentDecrement print def runMultiplePass(df, model, nMultiplePass, nTrain): """ run CLA model through data record 0:nTrain nMultiplePass passes """ predictedField = model.getInferenceArgs()['predictedField'] print "run TM through the train data multiple times" for nPass in xrange(nMultiplePass): for j in xrange(nTrain): inputRecord = getInputRecord(df, predictedField, j) result = model.run(inputRecord) if j % 100 == 0: print " pass %i, record %i" % (nPass, j) # reset temporal memory model._getTPRegion().getSelf()._tfdr.reset() return model def runMultiplePassSPonly(df, model, nMultiplePass, nTrain): """ run CLA model SP through data record 0:nTrain nMultiplePass passes """ predictedField = model.getInferenceArgs()['predictedField'] print "run TM through the train data multiple times" for nPass in xrange(nMultiplePass): for j in xrange(nTrain): inputRecord = getInputRecord(df, predictedField, j) model._sensorCompute(inputRecord) model._spCompute() if j % 400 == 0: print " pass %i, record %i" % (nPass, j) return model def getDateFormatAndPredictedField(dataSet): if dataSet == "rec-center-hourly": DATE_FORMAT = "%m/%d/%y %H:%M" # '7/2/10 0:00' predictedField = "kw_energy_consumption" elif (dataSet == "nyc_taxi" or dataSet == "nyc_taxi_perturb" or dataSet == "nyc_taxi_perturb_baseline"): DATE_FORMAT = '%Y-%m-%d %H:%M:%S' predictedField = "passenger_count" else: raise RuntimeError("un recognized dataset") return (DATE_FORMAT, predictedField) if __name__ == "__main__": (_options, _args) = _getArgs() dataSet = _options.dataSet plot = _options.plot noise = _options.noise print "Noise Amount: ", noise DATE_FORMAT, predictedField = getDateFormatAndPredictedField(dataSet) modelParams = getModelParamsFromName(dataSet) modelParams['modelParams']['clParams']['steps'] = str(_options.stepsAhead) # use customized CLA model print "Creating model from %s..." % dataSet model = CLAModel_custom(**modelParams['modelParams']) model.enableInference({"predictedField": predictedField}) model.enableLearning() model._spLearningEnabled = True model._tpLearningEnabled = True printTPRegionParams(model._getTPRegion()) inputData = "%s/%s.csv" % (DATA_DIR, dataSet.replace(" ", "_")) sensor = model._getSensorRegion() encoderList = sensor.getSelf().encoder.getEncoderList() if sensor.getSelf().disabledEncoder is not None: classifier_encoder = sensor.getSelf().disabledEncoder.getEncoderList() classifier_encoder = classifier_encoder[0] else: classifier_encoder = None # initialize new SDR classifier numTMcells = model._getTPRegion().getSelf()._tfdr.numberOfCells() sdrClassifier = SDRClassifier(steps=[5], alpha=0.005) _METRIC_SPECS = getMetricSpecs(predictedField, stepsAhead=_options.stepsAhead) metric = metrics.getModule(_METRIC_SPECS[0]) metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(), model.getInferenceType()) if plot: plotCount = 1 plotHeight = max(plotCount * 3, 6) fig = plt.figure(figsize=(14, plotHeight)) gs = gridspec.GridSpec(plotCount, 1) plt.title(predictedField) plt.ylabel('Data') plt.xlabel('Timed') plt.tight_layout() plt.ion() print "Load dataset: ", dataSet df = pd.read_csv(inputData, header=0, skiprows=[1, 2]) nMultiplePass = 5 nTrain = 5000 print " run SP through the first %i samples %i passes " % ( nMultiplePass, nTrain) model = runMultiplePassSPonly(df, model, nMultiplePass, nTrain) model._spLearningEnabled = False maxBucket = classifier_encoder.n - classifier_encoder.w + 1 likelihoodsVecAll = np.zeros((maxBucket, len(df))) likelihoodsVecAllNN = np.zeros((maxBucket, len(df))) predictionNstep = None timeStep = [] actualData = [] patternNZTrack = [] predictData = np.zeros((_options.stepsAhead, 0)) predictDataCLA = [] predictDataNN = [] negLLTrack = [] activeCellNum = [] predCellNum = [] predictedActiveColumnsNum = [] trueBucketIndex = [] sp = model._getSPRegion().getSelf()._sfdr spActiveCellsCount = np.zeros(sp.getColumnDimensions()) if noise > 0: datasetName = dataSet + "noise_{:.2f}".format(noise) else: datasetName = dataSet output = nupic_output.NuPICFileOutput([datasetName]) for i in xrange(len(df)): inputRecord = getInputRecord(df, predictedField, i, noise) tp = model._getTPRegion() tm = tp.getSelf()._tfdr prePredictiveCells = tm.predictiveCells prePredictiveColumn = np.array(list(prePredictiveCells)) / tm.cellsPerColumn # run model on the input Record result = model.run(inputRecord) # record and analyze the result trueBucketIndex.append( model._getClassifierInputRecord(inputRecord).bucketIndex) tp = model._getTPRegion() tm = tp.getSelf()._tfdr tpOutput = tm.infActiveState['t'] predictiveCells = tm.predictiveCells predCellNum.append(len(predictiveCells)) predColumn = np.array(list(predictiveCells)) / tm.cellsPerColumn patternNZ = tpOutput.reshape(-1).nonzero()[0] activeColumn = patternNZ / tm.cellsPerColumn activeCellNum.append(len(patternNZ)) predictedActiveColumns = np.intersect1d(prePredictiveColumn, activeColumn) predictedActiveColumnsNum.append(len(predictedActiveColumns)) # fed input to the new classifier classification = {'bucketIdx': result.classifierInput.bucketIndex, 'actValue': result.classifierInput.dataRow} nnRetval = sdrClassifier.compute(i, patternNZ, classification, learn=True, infer=True) nnPrediction = nnRetval['actualValues'][np.argmax(nnRetval[5])] predictDataNN.append(nnPrediction) output.write([i], [inputRecord[predictedField]], [float(nnPrediction)]) likelihoodsVecAllNN[0:len(nnRetval[5]), i] = nnRetval[5] result.metrics = metricsManager.update(result) negLL = result.metrics["multiStepBestPredictions:multiStep:" "errorMetric='negativeLogLikelihood':steps=%d:window=1000:" "field=%s" % (_options.stepsAhead, predictedField)] if i % 100 == 0 and i > 0: negLL = result.metrics["multiStepBestPredictions:multiStep:" "errorMetric='negativeLogLikelihood':steps=%d:window=1000:" "field=%s" % (_options.stepsAhead, predictedField)] nrmse = result.metrics["multiStepBestPredictions:multiStep:" "errorMetric='nrmse':steps=%d:window=1000:" "field=%s" % (_options.stepsAhead, predictedField)] numActiveCell = np.mean(activeCellNum[-100:]) numPredictiveCells = np.mean(predCellNum[-100:]) numCorrectPredicted = np.mean(predictedActiveColumnsNum[-100:]) print "After %i records, %d-step negLL=%f nrmse=%f ActiveCell %f PredCol %f CorrectPredCol %f" % \ (i, _options.stepsAhead, negLL, nrmse, numActiveCell, numPredictiveCells, numCorrectPredicted) bucketLL = \ result.inferences['multiStepBucketLikelihoods'][_options.stepsAhead] likelihoodsVec = np.zeros((maxBucket,)) if bucketLL is not None: for (k, v) in bucketLL.items(): likelihoodsVec[k] = v timeStep.append(i) actualData.append(inputRecord[predictedField]) predictDataCLA.append( result.inferences['multiStepBestPredictions'][_options.stepsAhead]) negLLTrack.append(negLL) likelihoodsVecAll[0:len(likelihoodsVec), i] = likelihoodsVec if plot and i > 500: # prepare data for display if i > 100: timeStepDisplay = timeStep[-500:-_options.stepsAhead] actualDataDisplay = actualData[-500 + _options.stepsAhead:] predictDataMLDisplay = predictDataCLA[-500:-_options.stepsAhead] predictDataNNDisplay = predictDataNN[-500:-_options.stepsAhead] likelihoodDisplay = likelihoodsVecAll[:, i - 499:i - _options.stepsAhead + 1] likelihoodDisplayNN = likelihoodsVecAllNN[:, i - 499:i - _options.stepsAhead + 1] xl = [(i) - 500, (i)] else: timeStepDisplay = timeStep actualDataDisplay = actualData predictDataMLDisplay = predictDataCLA predictDataNNDisplay = predictDataNN likelihoodDisplayNN = likelihoodsVecAllNN[:, :i + 1] likelihoodDisplay = likelihoodsVecAll[:, :i + 1] xl = [0, (i)] plt.figure(2) plt.clf() plt.imshow(likelihoodDisplay, extent=(timeStepDisplay[0], timeStepDisplay[-1], 0, 40000), interpolation='nearest', aspect='auto', origin='lower', cmap='Reds') plt.plot(timeStepDisplay, actualDataDisplay, 'k', label='Data') # plt.plot(timeStepDisplay, predictDataMLDisplay, 'b', label='Best Prediction') plt.xlim(xl) plt.xlabel('Time') plt.ylabel('Prediction') plt.title('TM, useTimeOfDay=' + str( True) + ' ' + dataSet + ' test neg LL = ' + str(negLL)) plt.draw() plt.figure(3) plt.clf() plt.imshow(likelihoodDisplayNN, extent=(timeStepDisplay[0], timeStepDisplay[-1], 0, 40000), interpolation='nearest', aspect='auto', origin='lower', cmap='Reds') plt.plot(timeStepDisplay, actualDataDisplay, 'k', label='Data') # plt.plot(timeStepDisplay, predictDataNNDisplay, 'b', label='Best Prediction') plt.xlim(xl) plt.xlabel('Time') plt.ylabel('Prediction') plt.title('TM, useTimeOfDay=' + str( True) + ' ' + dataSet + ' test neg LL = ' + str(negLL)) plt.draw() output.close() shiftedPredDataCLA = np.roll(np.array(predictDataCLA), _options.stepsAhead) shiftedPredDataNN = np.roll(np.array(predictDataNN), _options.stepsAhead) nTest = len(actualData) - nTrain - _options.stepsAhead NRMSECLA = NRMSE(actualData[nTrain:nTrain + nTest], shiftedPredDataCLA[nTrain:nTrain + nTest]) NRMSENN = NRMSE(actualData[nTrain:nTrain + nTest], shiftedPredDataNN[nTrain:nTrain + nTest]) MAPECLA = MAPE(actualData[nTrain:nTrain + nTest], shiftedPredDataCLA[nTrain:nTrain + nTest]) MAPENN = MAPE(actualData[nTrain:nTrain + nTest], shiftedPredDataNN[nTrain:nTrain + nTest]) print "NRMSE on test data, CLA: ", NRMSECLA print "NRMSE on test data, NN: ", NRMSENN # calculate neg-likelihood encoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True) truth = np.roll(actualData, -5) predictions = np.transpose(likelihoodsVecAll) negLLCLA = computeLikelihood(predictions, truth, encoder) negLLCLA[:5904] = np.nan predictions = np.transpose(likelihoodsVecAllNN) negLLNN = computeLikelihood(predictions, truth, encoder) negLLNN[:5904] = np.nan # save predicted distribution for likelihood calculation np.save('./result/' + datasetName + 'TMprediction.npy', predictions) np.save('./result/' + datasetName + 'TMtruth.npy', truth) plt.figure() plotAccuracy((negLLCLA, range(len(negLLCLA))), truth, window=480, errorType='negLL') plotAccuracy((negLLNN, range(len(negLLNN))), truth, window=480, errorType='negLL') # Compare NN classifier and CLA classifier plt.figure() shiftedActualData = np.roll(np.array(actualData), -_options.stepsAhead) plt.plot(shiftedActualData) plt.plot(predictDataNN) plt.plot(predictDataCLA) plt.legend(['True', 'NN', 'CLA']) plt.xlim([16600, 17000]) fig, ax = plt.subplots(nrows=1, ncols=3) inds = np.arange(2) ax1 = ax[0] width = 0.5 ax1.bar(inds, [NRMSECLA, NRMSENN], width=width) ax1.set_xticks(inds+width/2) ax1.set_ylabel('NRMSE') ax1.set_xlim([inds[0]-width*.6, inds[-1]+width*1.4]) ax1.set_xticklabels(('CLA', 'NN')) ax2 = ax[1] ax2.bar(inds, [MAPECLA, MAPENN], width=width) ax2.set_xticks(inds+width/2) ax2.set_ylabel('MAPE') ax2.set_xlim([inds[0]-width*.6, inds[-1]+width*1.4]) ax2.set_xticklabels(('CLA', 'NN')) ax3 = ax[2] ax3.bar(inds, [np.nanmean(negLLCLA), np.nanmean(negLLNN)], width=width) ax3.set_xticks(inds+width/2) ax3.set_ylabel('negLL') ax3.set_xlim([inds[0]-width*.6, inds[-1]+width*1.4]) ax3.set_xticklabels(('CLA', 'NN')) # Plot Example Prediction dataSet = 'nyc_taxi' filePath = './data/' + dataSet + '.csv' data = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['datetime', 'value', 'timeofday', 'dayofweek']) xaxis_datetime = pd.to_datetime(data['datetime']) plt.figure() plt.plot(xaxis_datetime, actualData) plt.plot(xaxis_datetime, predictDataNN) plt.legend(['Ground Truth', 'HTM prediction']) plt.ylabel(' Taxi Passenger Count') plt.xlim([xaxis_datetime[16600], xaxis_datetime[17000]]) plt.savefig('result/ExamplePredictionsHTM.pdf')
agpl-3.0
JackKelly/neuralnilm_prototype
scripts/e232.py
2
6618
from __future__ import print_function, division import matplotlib matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer from lasagne.nonlinearities import sigmoid, rectify from lasagne.objectives import crossentropy, mse from lasagne.init import Uniform, Normal from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer from lasagne.updates import nesterov_momentum from functools import partial import os from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff from neuralnilm.experiment import run_experiment from neuralnilm.net import TrainingError import __main__ NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" SAVE_PLOT_INTERVAL = 250 GRADIENT_STEPS = 100 """ e103 Discovered that bottom layer is hardly changing. So will try just a single lstm layer e104 standard init lower learning rate e106 lower learning rate to 0.001 e108 is e107 but with batch size of 5 e109 Normal(1) for BLSTM e110 * Back to Uniform(5) for BLSTM * Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f RESULTS: Seems to run fine again! e111 * Try with nntools head * peepholes=False RESULTS: appears to be working well. Haven't seen a NaN, even with training rate of 0.1 e112 * n_seq_per_batch = 50 e114 * Trying looking at layer by layer training again. * Start with single BLSTM layer e115 * Learning rate = 1 e116 * Standard inits e117 * Uniform(1) init e119 * Learning rate 10 # Result: didn't work well! e120 * init: Normal(1) * not as good as Uniform(5) e121 * Uniform(25) e122 * Just 10 cells * Uniform(5) e125 * Pre-train lower layers e128 * Add back all 5 appliances * Seq length 1500 * skip_prob = 0.7 e129 * max_input_power = None * 2nd layer has Uniform(5) * pre-train bottom layer for 2000 epochs * add third layer at 4000 epochs e131 e138 * Trying to replicate e82 and then break it ;) e140 diff e141 conv1D layer has Uniform(1), as does 2nd BLSTM layer e142 diff AND power e144 diff and power and max power is 5900 e145 Uniform(25) for first layer e146 gradient clip and use peepholes e147 * try again with new code e148 * learning rate 0.1 e150 * Same as e149 but without peepholes and using BLSTM not BBLSTM e151 * Max pooling 171 lower learning rate 172 even lower learning rate 173 slightly higher learning rate! 175 same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs 176 new cost function 177 another new cost func (this one avoids NaNs) skip prob 0.7 10x higher learning rate 178 refactored cost func (functionally equiv to 177) 0.1x learning rate e180 * mse e181 * back to scaled cost * different architecture: - convd1 at input (2x) - then 3 LSTM layers, each with a 2x conv in between - no diff input e189 * divide dominant appliance power * mse 217 no peepholes 218 don't clip gradient lag=64 219 back to lag=32 try all 5 appliances (with max input = 500) """ # def scaled_cost(x, t): # raw_cost = (x - t) ** 2 # energy_per_seq = t.sum(axis=1) # energy_per_batch = energy_per_seq.sum(axis=1) # energy_per_batch = energy_per_batch.reshape((-1, 1)) # normaliser = energy_per_seq / energy_per_batch # cost = raw_cost.mean(axis=1) * (1 - normaliser) # return cost.mean() from theano.ifelse import ifelse import theano.tensor as T THRESHOLD = 0 def scaled_cost(x, t): sq_error = (x - t) ** 2 def mask_and_mean_sq_error(mask): masked_sq_error = sq_error[mask.nonzero()] mean = masked_sq_error.mean() mean = ifelse(T.isnan(mean), 0.0, mean) return mean above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD) below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD) return (above_thresh_mean + below_thresh_mean) / 2.0 def exp_a(name): # global source # source = RealApplianceSource( # filename='/data/dk3810/ukdale.h5', # appliances=[ # ['fridge freezer', 'fridge', 'freezer'], # 'hair straighteners', # 'television', # 'dish washer', # ['washer dryer', 'washing machine'] # ], # max_appliance_powers=None,#[500] * 5, # on_power_thresholds=[5] * 5, # max_input_power=2500, # min_on_durations=[60, 60, 60, 1800, 1800], # min_off_durations=[12, 12, 12, 1800, 600], # window=("2013-06-01", "2014-07-01"), # seq_length=1500, # output_one_appliance=False, # boolean_targets=False, # train_buildings=[1], # validation_buildings=[1], # skip_probability=0.7, # n_seq_per_batch=25, # # subsample_target=4, # # input_padding=0, # include_diff=False, # clip_appliance_power=False, # lag=0 # ) net = Net( experiment_name=name, source=source, save_plot_interval=1000, loss_function=scaled_cost, updates=partial(nesterov_momentum, learning_rate=0.0001), layers_config=[ { 'type': DenseLayer, 'num_units': 200, 'nonlinearity': sigmoid, 'W': Uniform(5), 'b': Uniform(5) }, { 'type': DenseLayer, 'num_units': 50, 'nonlinearity': sigmoid, 'W': Uniform(1), 'b': Uniform(1) }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': None, 'W': Uniform(25) } ] ) return net def init_experiment(experiment): full_exp_name = NAME + experiment func_call = 'exp_{:s}(full_exp_name)'.format(experiment) print("***********************************") print("Preparing", full_exp_name, "...") net = eval(func_call) return net def main(): for experiment in list('a'): full_exp_name = NAME + experiment path = os.path.join(PATH, full_exp_name) try: net = init_experiment(experiment) run_experiment(net, path, epochs=None) except KeyboardInterrupt: break except TrainingError as exception: print("EXCEPTION:", exception) except Exception as exception: raise print("EXCEPTION:", exception) import ipdb; ipdb.set_trace() if __name__ == "__main__": main()
mit
abitofalchemy/ScientificImpactPrediction
procjson.py
1
4282
# -*- coding: utf-8 -*- __author__ = 'Sal Aguinaga' __license__ = "GPL" __version__ = "0.1.0" __email__ = "[email protected]" import pprint as pp import pandas as pd import numpy as np import re import json, time, sys, csv from HTMLParser import HTMLParser import sys, os, argparse import traceback import time, datetime import ast import glob import matplotlib matplotlib.use('pdf') import matplotlib.pyplot as plt plt.style.use('ggplot') ## http://stackoverflow.com/questions/23531608/how-do-i-save-streaming-tweets-in-json-via-tweepy ## https://www.airpair.com/python/posts/top-mistakes-python-big-data-analytics def level1_json_proc(in_json_fname=""): if not in_json_fname: print 'Not a valid filename' return tweets = pd.DataFrame() mozsprint_data = [] tweet_links = [] for in_file in glob.glob(in_json_fname+"iso*json"): print '-- working with:', in_file, '-'*20 with open(in_file) as f: for j,line in enumerate(f): rawtweet="" try: rawtweet = ast.literal_eval(line.strip('\r\n')) except Exception, e: # print "this line" try: rawtweet = json.loads(line.strip('\r\n')) except Exception, e: print "!!",str(e) mozsprint_data.append(rawtweet) # print len(mozsprint_data) # break # Create the dataframe we will use tweets = pd.DataFrame() # We want to know when a tweet was sent tweets['created_at'] = map(lambda tweet: time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')), mozsprint_data) # Who is the tweet owner tweets['user'] = map(lambda tweet: tweet['user']['screen_name'], mozsprint_data) # How many follower this user has # tweets['user_followers_count'] = map(lambda tweet: tweet['user']['followers_count'], mozsprint_data) # What is the tweet's content tweets['text'] = map(lambda tweet: tweet['text'].encode('utf-8'), mozsprint_data) # Get Hyperlinks lnks = re.findall(r'(https?://\S+)', tt) # tweet_links.append(map(lambda tweet: re.findall(r'(https?://\S+)', tweet['text'].encode('utf-8')), mozsprint_data)) # If available what is the language the tweet is written in# # tweets['lang'] = map(lambda tweet: tweet['lang'], mozsprint_data) # # If available, where was the tweet sent from ? # tweets['Location'] = map(lambda tweet: tweet['place']['country'] if tweet['place'] != None else None, mozsprint_data) # # How many times this tweet was retweeted and favorited # tweets['retweet_count'] = map(lambda tweet: tweet['retweet_count'], mozsprint_data) # tweets['favorite_count'] = map(lambda tweet: tweet['favorite_count'], mozsprint_data) # ts = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(rtweets['created_at'],'%a %b %d %H:%M:%S +0000 %Y')) # df = pd.DataFrame([[rtweets['user']['screen_name']],[ rtweets['text']]], index=ts) # df = pd.to_datetime([rtweets['user']['screen_name'], rtweets['text']], format='%d%b%Y:%H:%M:%S.%f') # Trim DF omit duplicates tweets = tweets.drop_duplicates() # Get links if in tweet tmpar= map(lambda tweet: re.findall(r'(https?://\S+)', tweet), tweets['text']) from itertools import chain tmps = list(chain.from_iterable(tmpar)) # Save the trimmed tweet links np.savetxt('Results/tweets_hyperlinks.tsv',tmps,fmt="%s", delimiter='\t') df = pd.DataFrame(tweets['created_at'].value_counts(), columns=['number_tweets']) df['date'] = df.index days = [item.split(" ")[0] for item in df['date'].values] df['days'] = days grouped_tweets = df[['days', 'number_tweets']].groupby('days') tweet_growth = grouped_tweets.sum() tweet_growth['days']= tweet_growth.index print tweet_growth.head() tweet_growth['number_tweets'].to_csv('Results/tweet_countxdate.tsv', sep='\t', header=True) if 0: tweet_growth.plot(kind='bar') plt.savefig('outfig', bb_inches='tight') # print df return def get_parser(): parser = argparse.ArgumentParser(description='procjson') parser.add_argument('jsonfile', metavar='JSONFILE', help='Input file.') parser.add_argument('--version', action='version', version=__version__) return parser def main(): parser = get_parser() args = vars(parser.parse_args()) level1_json_proc(args['jsonfile']) if not args['jsonfile']: parser.print_help() os._exit(1) if __name__=='__main__': main() print 'Done'
mit
spallavolu/scikit-learn
examples/ensemble/plot_gradient_boosting_regression.py
227
2520
""" ============================ Gradient Boosting regression ============================ Demonstrate Gradient Boosting on the Boston housing dataset. This example fits a Gradient Boosting model with least squares loss and 500 regression trees of depth 4. """ print(__doc__) # Author: Peter Prettenhofer <[email protected]> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn import datasets from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error ############################################################################### # Load data boston = datasets.load_boston() X, y = shuffle(boston.data, boston.target, random_state=13) X = X.astype(np.float32) offset = int(X.shape[0] * 0.9) X_train, y_train = X[:offset], y[:offset] X_test, y_test = X[offset:], y[offset:] ############################################################################### # Fit regression model params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1, 'learning_rate': 0.01, 'loss': 'ls'} clf = ensemble.GradientBoostingRegressor(**params) clf.fit(X_train, y_train) mse = mean_squared_error(y_test, clf.predict(X_test)) print("MSE: %.4f" % mse) ############################################################################### # Plot training deviance # compute test set deviance test_score = np.zeros((params['n_estimators'],), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): test_score[i] = clf.loss_(y_test, y_pred) plt.figure(figsize=(12, 6)) plt.subplot(1, 2, 1) plt.title('Deviance') plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-', label='Training Set Deviance') plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-', label='Test Set Deviance') plt.legend(loc='upper right') plt.xlabel('Boosting Iterations') plt.ylabel('Deviance') ############################################################################### # Plot feature importance feature_importance = clf.feature_importances_ # make importances relative to max importance feature_importance = 100.0 * (feature_importance / feature_importance.max()) sorted_idx = np.argsort(feature_importance) pos = np.arange(sorted_idx.shape[0]) + .5 plt.subplot(1, 2, 2) plt.barh(pos, feature_importance[sorted_idx], align='center') plt.yticks(pos, boston.feature_names[sorted_idx]) plt.xlabel('Relative Importance') plt.title('Variable Importance') plt.show()
bsd-3-clause
dfm/celerite
paper/figures/sho.py
3
2042
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division, print_function import numpy as np import matplotlib.pyplot as plt from celerite.plot_setup import setup, get_figsize np.random.seed(42) setup(auto=True) def sho_psd(Q, x): x2 = x*x return 1.0 / ((x2 - 1)**2 + x2 / Q**2) def sho_acf(Q, tau): t = np.abs(tau) if np.allclose(Q, 0.5): return np.exp(-t) * (1.0 + t) b = 1.0 / np.sqrt(4*Q**2 - 1) c = 0.5 / Q d = 0.5 * np.sqrt(4*Q**2 - 1) / Q return np.exp(-c * t) * (np.cos(d*t)+b*np.sin(d*t)) def lorentz_psd(Q, x): return Q**2 * (1.0 / ((x - 1)**2 * (2*Q)**2 + 1) + 1.0 / ((x + 1)**2 * (2*Q)**2 + 1)) def lorentz_acf(Q, tau): t = np.abs(tau) return np.exp(-0.5*t/Q) * np.cos(t) fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=get_figsize(1, 3)) x = 10**np.linspace(-1.1, 1.1, 5000) tau = np.linspace(0, 20, 1000) for i, (Q_name, Q) in enumerate( [("1/2", 0.5), ("1/\\sqrt{2}", 1./np.sqrt(2)), ("2", 2.0), ("10", 10.0)]): l, = ax1.plot(x, sho_psd(Q, x), label="$Q = {0}$".format(Q_name), lw=1.5) c = l.get_color() ax2.plot(tau, sho_acf(Q, tau), label="$Q = {0}$".format(Q_name), lw=1.5, color=c) K = sho_acf(Q, tau[:, None] - tau[None, :]) y = np.random.multivariate_normal(np.zeros(len(tau)), K, size=3) ax3.axhline(-5*i, color="k", lw=0.75) ax3.plot(tau, -5*i + (y - np.mean(y, axis=1)[:, None]).T, color=c, lw=1) ax1.plot(x, lorentz_psd(10.0, x), "--k") ax2.plot(tau, lorentz_acf(10.0, tau), "--k") ax1.set_xscale("log") ax1.set_yscale("log") ax1.set_xlim(x.min(), x.max()) ax1.set_ylim(2e-4, 200.0) ax1.legend(loc=3, fontsize=11) ax1.set_xlabel("$\omega/\omega_0$") ax1.set_ylabel("$S(\omega) / S(0)$") ax2.set_xlim(tau.min(), tau.max()) ax2.set_ylim(-1.1, 1.1) ax2.set_xlabel("$\omega_0\,\\tau$") ax2.set_ylabel("$k(\\tau) / k(0)$") ax3.set_xlim(0, 20) ax3.set_yticklabels([]) ax3.set_xlabel("$\omega_0\,t$") fig.savefig("sho.pdf", bbox_inches="tight")
mit
mbayon/TFG-MachineLearning
vbig/lib/python2.7/site-packages/pandas/tests/indexes/period/test_construction.py
6
19404
import pytest import numpy as np import pandas as pd import pandas.util.testing as tm import pandas.core.indexes.period as period from pandas.compat import lrange, PY3, text_type, lmap from pandas import (Period, PeriodIndex, period_range, offsets, date_range, Series, Index) class TestPeriodIndex(object): def setup_method(self, method): pass def test_construction_base_constructor(self): # GH 13664 arr = [pd.Period('2011-01', freq='M'), pd.NaT, pd.Period('2011-03', freq='M')] tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr)) tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr))) arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')] tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr)) tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr))) arr = [pd.Period('2011-01', freq='M'), pd.NaT, pd.Period('2011-03', freq='D')] tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object)) tm.assert_index_equal(pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)) def test_constructor_use_start_freq(self): # GH #1118 p = Period('4/2/2012', freq='B') index = PeriodIndex(start=p, periods=10) expected = PeriodIndex(start='4/2/2012', periods=10, freq='B') tm.assert_index_equal(index, expected) def test_constructor_field_arrays(self): # GH #1264 years = np.arange(1990, 2010).repeat(4)[2:-2] quarters = np.tile(np.arange(1, 5), 20)[2:-2] index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC') expected = period_range('1990Q3', '2009Q2', freq='Q-DEC') tm.assert_index_equal(index, expected) index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC') tm.assert_numpy_array_equal(index.asi8, index2.asi8) index = PeriodIndex(year=years, quarter=quarters) tm.assert_index_equal(index, expected) years = [2007, 2007, 2007] months = [1, 2] pytest.raises(ValueError, PeriodIndex, year=years, month=months, freq='M') pytest.raises(ValueError, PeriodIndex, year=years, month=months, freq='2M') pytest.raises(ValueError, PeriodIndex, year=years, month=months, freq='M', start=Period('2007-01', freq='M')) years = [2007, 2007, 2007] months = [1, 2, 3] idx = PeriodIndex(year=years, month=months, freq='M') exp = period_range('2007-01', periods=3, freq='M') tm.assert_index_equal(idx, exp) def test_constructor_U(self): # U was used as undefined period pytest.raises(ValueError, period_range, '2007-1-1', periods=500, freq='X') def test_constructor_nano(self): idx = period_range(start=Period(ordinal=1, freq='N'), end=Period(ordinal=4, freq='N'), freq='N') exp = PeriodIndex([Period(ordinal=1, freq='N'), Period(ordinal=2, freq='N'), Period(ordinal=3, freq='N'), Period(ordinal=4, freq='N')], freq='N') tm.assert_index_equal(idx, exp) def test_constructor_arrays_negative_year(self): years = np.arange(1960, 2000, dtype=np.int64).repeat(4) quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40) pindex = PeriodIndex(year=years, quarter=quarters) tm.assert_index_equal(pindex.year, pd.Index(years)) tm.assert_index_equal(pindex.quarter, pd.Index(quarters)) def test_constructor_invalid_quarters(self): pytest.raises(ValueError, PeriodIndex, year=lrange(2000, 2004), quarter=lrange(4), freq='Q-DEC') def test_constructor_corner(self): pytest.raises(ValueError, PeriodIndex, periods=10, freq='A') start = Period('2007', freq='A-JUN') end = Period('2010', freq='A-DEC') pytest.raises(ValueError, PeriodIndex, start=start, end=end) pytest.raises(ValueError, PeriodIndex, start=start) pytest.raises(ValueError, PeriodIndex, end=end) result = period_range('2007-01', periods=10.5, freq='M') exp = period_range('2007-01', periods=10, freq='M') tm.assert_index_equal(result, exp) def test_constructor_fromarraylike(self): idx = period_range('2007-01', periods=20, freq='M') # values is an array of Period, thus can retrieve freq tm.assert_index_equal(PeriodIndex(idx.values), idx) tm.assert_index_equal(PeriodIndex(list(idx.values)), idx) pytest.raises(ValueError, PeriodIndex, idx._values) pytest.raises(ValueError, PeriodIndex, list(idx._values)) pytest.raises(TypeError, PeriodIndex, data=Period('2007', freq='A')) result = PeriodIndex(iter(idx)) tm.assert_index_equal(result, idx) result = PeriodIndex(idx) tm.assert_index_equal(result, idx) result = PeriodIndex(idx, freq='M') tm.assert_index_equal(result, idx) result = PeriodIndex(idx, freq=offsets.MonthEnd()) tm.assert_index_equal(result, idx) assert result.freq, 'M' result = PeriodIndex(idx, freq='2M') tm.assert_index_equal(result, idx.asfreq('2M')) assert result.freq, '2M' result = PeriodIndex(idx, freq=offsets.MonthEnd(2)) tm.assert_index_equal(result, idx.asfreq('2M')) assert result.freq, '2M' result = PeriodIndex(idx, freq='D') exp = idx.asfreq('D', 'e') tm.assert_index_equal(result, exp) def test_constructor_datetime64arr(self): vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64) vals = vals.view(np.dtype('M8[us]')) pytest.raises(ValueError, PeriodIndex, vals, freq='D') def test_constructor_dtype(self): # passing a dtype with a tz should localize idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]') exp = PeriodIndex(['2013-01', '2013-03'], freq='M') tm.assert_index_equal(idx, exp) assert idx.dtype == 'period[M]' idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]') exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D') tm.assert_index_equal(idx, exp) assert idx.dtype == 'period[3D]' # if we already have a freq and its not the same, then asfreq # (not changed) idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D') res = PeriodIndex(idx, dtype='period[M]') exp = PeriodIndex(['2013-01', '2013-01'], freq='M') tm.assert_index_equal(res, exp) assert res.dtype == 'period[M]' res = PeriodIndex(idx, freq='M') tm.assert_index_equal(res, exp) assert res.dtype == 'period[M]' msg = 'specified freq and dtype are different' with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex(['2011-01'], freq='M', dtype='period[D]') def test_constructor_empty(self): idx = pd.PeriodIndex([], freq='M') assert isinstance(idx, PeriodIndex) assert len(idx) == 0 assert idx.freq == 'M' with tm.assert_raises_regex(ValueError, 'freq not specified'): pd.PeriodIndex([]) def test_constructor_pi_nat(self): idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT, Period('2011-01', freq='M')]) exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT, Period('2011-01', freq='M')])) tm.assert_index_equal(idx, exp) idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'), Period('2011-01', freq='M')]) exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) idx = PeriodIndex(np.array([pd.NaT, pd.NaT, Period('2011-01', freq='M'), Period('2011-01', freq='M')])) tm.assert_index_equal(idx, exp) idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) with tm.assert_raises_regex(ValueError, 'freq not specified'): PeriodIndex([pd.NaT, pd.NaT]) with tm.assert_raises_regex(ValueError, 'freq not specified'): PeriodIndex(np.array([pd.NaT, pd.NaT])) with tm.assert_raises_regex(ValueError, 'freq not specified'): PeriodIndex(['NaT', 'NaT']) with tm.assert_raises_regex(ValueError, 'freq not specified'): PeriodIndex(np.array(['NaT', 'NaT'])) def test_constructor_incompat_freq(self): msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)" with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex([Period('2011-01', freq='M'), pd.NaT, Period('2011-01', freq='D')]) with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT, Period('2011-01', freq='D')])) # first element is pd.NaT with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex([pd.NaT, Period('2011-01', freq='M'), Period('2011-01', freq='D')]) with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'), Period('2011-01', freq='D')])) def test_constructor_mixed(self): idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')]) exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')]) exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT, '2012-01-01']) exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D') tm.assert_index_equal(idx, exp) def test_constructor_simple_new(self): idx = period_range('2007-01', name='p', periods=2, freq='M') result = idx._simple_new(idx, 'p', freq=idx.freq) tm.assert_index_equal(result, idx) result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq) tm.assert_index_equal(result, idx) result = idx._simple_new([pd.Period('2007-01', freq='M'), pd.Period('2007-02', freq='M')], 'p', freq=idx.freq) tm.assert_index_equal(result, idx) result = idx._simple_new(np.array([pd.Period('2007-01', freq='M'), pd.Period('2007-02', freq='M')]), 'p', freq=idx.freq) tm.assert_index_equal(result, idx) def test_constructor_simple_new_empty(self): # GH13079 idx = PeriodIndex([], freq='M', name='p') result = idx._simple_new(idx, name='p', freq='M') tm.assert_index_equal(result, idx) def test_constructor_floats(self): # GH13079 for floats in [[1.1, 2.1], np.array([1.1, 2.1])]: with pytest.raises(TypeError): pd.PeriodIndex._simple_new(floats, freq='M') with pytest.raises(TypeError): pd.PeriodIndex(floats, freq='M') def test_constructor_nat(self): pytest.raises(ValueError, period_range, start='NaT', end='2011-01-01', freq='M') pytest.raises(ValueError, period_range, start='2011-01-01', end='NaT', freq='M') def test_constructor_year_and_quarter(self): year = pd.Series([2001, 2002, 2003]) quarter = year - 2000 idx = PeriodIndex(year=year, quarter=quarter) strs = ['%dQ%d' % t for t in zip(quarter, year)] lops = list(map(Period, strs)) p = PeriodIndex(lops) tm.assert_index_equal(p, idx) def test_constructor_freq_mult(self): # GH #7811 for func in [PeriodIndex, period_range]: # must be the same, but for sure... pidx = func(start='2014-01', freq='2M', periods=4) expected = PeriodIndex(['2014-01', '2014-03', '2014-05', '2014-07'], freq='2M') tm.assert_index_equal(pidx, expected) pidx = func(start='2014-01-02', end='2014-01-15', freq='3D') expected = PeriodIndex(['2014-01-02', '2014-01-05', '2014-01-08', '2014-01-11', '2014-01-14'], freq='3D') tm.assert_index_equal(pidx, expected) pidx = func(end='2014-01-01 17:00', freq='4H', periods=3) expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00', '2014-01-01 17:00'], freq='4H') tm.assert_index_equal(pidx, expected) msg = ('Frequency must be positive, because it' ' represents span: -1M') with tm.assert_raises_regex(ValueError, msg): PeriodIndex(['2011-01'], freq='-1M') msg = ('Frequency must be positive, because it' ' represents span: 0M') with tm.assert_raises_regex(ValueError, msg): PeriodIndex(['2011-01'], freq='0M') msg = ('Frequency must be positive, because it' ' represents span: 0M') with tm.assert_raises_regex(ValueError, msg): period_range('2011-01', periods=3, freq='0M') def test_constructor_freq_mult_dti_compat(self): import itertools mults = [1, 2, 3, 4, 5] freqs = ['A', 'M', 'D', 'T', 'S'] for mult, freq in itertools.product(mults, freqs): freqstr = str(mult) + freq pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10) expected = date_range(start='2014-04-01', freq=freqstr, periods=10).to_period(freqstr) tm.assert_index_equal(pidx, expected) def test_constructor_freq_combined(self): for freq in ['1D1H', '1H1D']: pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq) expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'], freq='25H') for freq, func in zip(['1D1H', '1H1D'], [PeriodIndex, period_range]): pidx = func(start='2016-01-01', periods=2, freq=freq) expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'], freq='25H') tm.assert_index_equal(pidx, expected) def test_constructor(self): pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') assert len(pi) == 9 pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009') assert len(pi) == 4 * 9 pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') assert len(pi) == 12 * 9 pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009') assert len(pi) == 365 * 9 + 2 pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009') assert len(pi) == 261 * 9 pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00') assert len(pi) == 365 * 24 pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59') assert len(pi) == 24 * 60 pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59') assert len(pi) == 24 * 60 * 60 start = Period('02-Apr-2005', 'B') i1 = PeriodIndex(start=start, periods=20) assert len(i1) == 20 assert i1.freq == start.freq assert i1[0] == start end_intv = Period('2006-12-31', 'W') i1 = PeriodIndex(end=end_intv, periods=10) assert len(i1) == 10 assert i1.freq == end_intv.freq assert i1[-1] == end_intv end_intv = Period('2006-12-31', '1w') i2 = PeriodIndex(end=end_intv, periods=10) assert len(i1) == len(i2) assert (i1 == i2).all() assert i1.freq == i2.freq end_intv = Period('2006-12-31', ('w', 1)) i2 = PeriodIndex(end=end_intv, periods=10) assert len(i1) == len(i2) assert (i1 == i2).all() assert i1.freq == i2.freq end_intv = Period('2005-05-01', 'B') i1 = PeriodIndex(start=start, end=end_intv) # infer freq from first element i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')]) assert len(i2) == 2 assert i2[0] == end_intv i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')])) assert len(i2) == 2 assert i2[0] == end_intv # Mixed freq should fail vals = [end_intv, Period('2006-12-31', 'w')] pytest.raises(ValueError, PeriodIndex, vals) vals = np.array(vals) pytest.raises(ValueError, PeriodIndex, vals) def test_constructor_error(self): start = Period('02-Apr-2005', 'B') end_intv = Period('2006-12-31', ('w', 1)) msg = 'Start and end must have same freq' with tm.assert_raises_regex(ValueError, msg): PeriodIndex(start=start, end=end_intv) msg = 'Must specify 2 of start, end, periods' with tm.assert_raises_regex(ValueError, msg): PeriodIndex(start=start) def test_recreate_from_data(self): for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']: org = PeriodIndex(start='2001/04/01', freq=o, periods=1) idx = PeriodIndex(org.values, freq=o) tm.assert_index_equal(idx, org) def test_map_with_string_constructor(self): raw = [2005, 2007, 2009] index = PeriodIndex(raw, freq='A') types = str, if PY3: # unicode types += text_type, for t in types: expected = Index(lmap(t, raw)) res = index.map(t) # should return an Index assert isinstance(res, Index) # preserve element types assert all(isinstance(resi, t) for resi in res) # lastly, values should compare equal tm.assert_index_equal(res, expected) class TestSeriesPeriod(object): def setup_method(self, method): self.series = Series(period_range('2000-01-01', periods=10, freq='D')) def test_constructor_cant_cast_period(self): with pytest.raises(TypeError): Series(period_range('2000-01-01', periods=10, freq='D'), dtype=float) def test_constructor_cast_object(self): s = Series(period_range('1/1/2000', periods=10), dtype=object) exp = Series(period_range('1/1/2000', periods=10)) tm.assert_series_equal(s, exp)
mit
PatrickOReilly/scikit-learn
sklearn/feature_selection/variance_threshold.py
123
2572
# Author: Lars Buitinck # License: 3-clause BSD import numpy as np from ..base import BaseEstimator from .base import SelectorMixin from ..utils import check_array from ..utils.sparsefuncs import mean_variance_axis from ..utils.validation import check_is_fitted class VarianceThreshold(BaseEstimator, SelectorMixin): """Feature selector that removes all low-variance features. This feature selection algorithm looks only at the features (X), not the desired outputs (y), and can thus be used for unsupervised learning. Read more in the :ref:`User Guide <variance_threshold>`. Parameters ---------- threshold : float, optional Features with a training-set variance lower than this threshold will be removed. The default is to keep all features with non-zero variance, i.e. remove the features that have the same value in all samples. Attributes ---------- variances_ : array, shape (n_features,) Variances of individual features. Examples -------- The following dataset has integer features, two of which are the same in every sample. These are removed with the default setting for threshold:: >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]] >>> selector = VarianceThreshold() >>> selector.fit_transform(X) array([[2, 0], [1, 4], [1, 1]]) """ def __init__(self, threshold=0.): self.threshold = threshold def fit(self, X, y=None): """Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Sample vectors from which to compute variances. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self """ X = check_array(X, ('csr', 'csc'), dtype=np.float64) if hasattr(X, "toarray"): # sparse matrix _, self.variances_ = mean_variance_axis(X, axis=0) else: self.variances_ = np.var(X, axis=0) if np.all(self.variances_ <= self.threshold): msg = "No feature in X meets the variance threshold {0:.5f}" if X.shape[0] == 1: msg += " (X contains only one sample)" raise ValueError(msg.format(self.threshold)) return self def _get_support_mask(self): check_is_fitted(self, 'variances_') return self.variances_ > self.threshold
bsd-3-clause
gxxjjj/QuantEcon.py
quantecon/arma.py
7
9906
""" Filename: arma.py Authors: Doc-Jin Jang, Jerry Choi, Thomas Sargent, John Stachurski Provides functions for working with and visualizing scalar ARMA processes. """ import numpy as np from numpy import conj, pi import matplotlib.pyplot as plt from scipy.signal import dimpulse, freqz, dlsim # == Ignore unnecessary warnings concerning casting complex variables back to # floats == # import warnings warnings.filterwarnings('ignore') class ARMA(object): r""" This class represents scalar ARMA(p, q) processes. If phi and theta are scalars, then the model is understood to be .. math:: X_t = \phi X_{t-1} + \epsilon_t + \theta \epsilon_{t-1} where :math:`epsilon_t` is a white noise process with standard deviation :math:`sigma`. If phi and theta are arrays or sequences, then the interpretation is the ARMA(p, q) model .. math:: X_t = \phi_1 X_{t-1} + ... + \phi_p X_{t-p} + \epsilon_t + \theta_1 \epsilon_{t-1} + ... + \theta_q \epsilon_{t-q} where * :math:`\phi = (\phi_1, \phi_2,..., \phi_p)` * :math:`\theta = (\theta_1, \theta_2,..., \theta_q)` * :math:`\sigma` is a scalar, the standard deviation of the white noise Parameters ---------- phi : scalar or iterable or array_like(float) Autocorrelation values for the autocorrelated variable. See above for explanation. theta : scalar or iterable or array_like(float) Autocorrelation values for the white noise of the model. See above for explanation sigma : scalar(float) The standard deviation of the white noise Attributes ---------- phi, theta, sigma : see Parmeters ar_poly : array_like(float) The polynomial form that is needed by scipy.signal to do the processing we desire. Corresponds with the phi values ma_poly : array_like(float) The polynomial form that is needed by scipy.signal to do the processing we desire. Corresponds with the theta values """ def __init__(self, phi, theta=0, sigma=1): self._phi, self._theta = phi, theta self.sigma = sigma self.set_params() def __repr__(self): m = "ARMA(phi=%s, theta=%s, sigma=%s)" return m % (self.phi, self.theta, self.sigma) def __str__(self): m = "An ARMA({p}, {q}) process" p = np.asarray(self.phi).size q = np.asarray(self.theta).size return m.format(p=p, q=q) # Special latex print method for working in notebook def _repr_latex_(self): m = r"$X_t = " phi = np.atleast_1d(self.phi) theta = np.atleast_1d(self.theta) rhs = "" for (tm, phi_p) in enumerate(phi): # don't include terms if they are equal to zero if abs(phi_p) > 1e-12: rhs += r"%+g X_{t-%i}" % (phi_p, tm+1) if rhs[0] == "+": rhs = rhs[1:] # remove initial `+` if phi_1 was positive rhs += r" + \epsilon_t" for (tm, th_q) in enumerate(theta): # don't include terms if they are equal to zero if abs(th_q) > 1e-12: rhs += r"%+g \epsilon_{t-%i}" % (th_q, tm+1) return m + rhs + "$" @property def phi(self): return self._phi @phi.setter def phi(self, new_value): self._phi = new_value self.set_params() @property def theta(self): return self._theta @theta.setter def theta(self, new_value): self._theta = new_value self.set_params() def set_params(self): r""" Internally, scipy.signal works with systems of the form .. math:: ar_{poly}(L) X_t = ma_{poly}(L) \epsilon_t where L is the lag operator. To match this, we set .. math:: ar_{poly} = (1, -\phi_1, -\phi_2,..., -\phi_p) ma_{poly} = (1, \theta_1, \theta_2,..., \theta_q) In addition, ar_poly must be at least as long as ma_poly. This can be achieved by padding it out with zeros when required. """ # === set up ma_poly === # ma_poly = np.asarray(self._theta) self.ma_poly = np.insert(ma_poly, 0, 1) # The array (1, theta) # === set up ar_poly === # if np.isscalar(self._phi): ar_poly = np.array(-self._phi) else: ar_poly = -np.asarray(self._phi) self.ar_poly = np.insert(ar_poly, 0, 1) # The array (1, -phi) # === pad ar_poly with zeros if required === # if len(self.ar_poly) < len(self.ma_poly): temp = np.zeros(len(self.ma_poly) - len(self.ar_poly)) self.ar_poly = np.hstack((self.ar_poly, temp)) def impulse_response(self, impulse_length=30): """ Get the impulse response corresponding to our model. Returns ------- psi : array_like(float) psi[j] is the response at lag j of the impulse response. We take psi[0] as unity. """ sys = self.ma_poly, self.ar_poly, 1 times, psi = dimpulse(sys, n=impulse_length) psi = psi[0].flatten() # Simplify return value into flat array return psi def spectral_density(self, two_pi=True, res=1200): r""" Compute the spectral density function. The spectral density is the discrete time Fourier transform of the autocovariance function. In particular, .. math:: f(w) = \sum_k \gamma(k) exp(-ikw) where gamma is the autocovariance function and the sum is over the set of all integers. Parameters ---------- two_pi : Boolean, optional Compute the spectral density function over [0, pi] if two_pi is False and [0, 2 pi] otherwise. Default value is True res : scalar or array_like(int), optional(default=1200) If res is a scalar then the spectral density is computed at `res` frequencies evenly spaced around the unit circle, but if res is an array then the function computes the response at the frequencies given by the array Returns ------- w : array_like(float) The normalized frequencies at which h was computed, in radians/sample spect : array_like(float) The frequency response """ w, h = freqz(self.ma_poly, self.ar_poly, worN=res, whole=two_pi) spect = h * conj(h) * self.sigma**2 return w, spect def autocovariance(self, num_autocov=16): """ Compute the autocovariance function from the ARMA parameters over the integers range(num_autocov) using the spectral density and the inverse Fourier transform. Parameters ---------- num_autocov : scalar(int), optional(default=16) The number of autocovariances to calculate """ spect = self.spectral_density()[1] acov = np.fft.ifft(spect).real # num_autocov should be <= len(acov) / 2 return acov[:num_autocov] def simulation(self, ts_length=90): """ Compute a simulated sample path assuming Gaussian shocks. Parameters ---------- ts_length : scalar(int), optional(default=90) Number of periods to simulate for Returns ------- vals : array_like(float) A simulation of the model that corresponds to this class """ sys = self.ma_poly, self.ar_poly, 1 u = np.random.randn(ts_length, 1) * self.sigma vals = dlsim(sys, u)[1] return vals.flatten() def plot_impulse_response(self, ax=None, show=True): if show: fig, ax = plt.subplots() ax.set_title('Impulse response') yi = self.impulse_response() ax.stem(list(range(len(yi))), yi) ax.set_xlim(xmin=(-0.5)) ax.set_ylim(min(yi)-0.1, max(yi)+0.1) ax.set_xlabel('time') ax.set_ylabel('response') if show: plt.show() def plot_spectral_density(self, ax=None, show=True): if show: fig, ax = plt.subplots() ax.set_title('Spectral density') w, spect = self.spectral_density(two_pi=False) ax.semilogy(w, spect) ax.set_xlim(0, pi) ax.set_ylim(0, np.max(spect)) ax.set_xlabel('frequency') ax.set_ylabel('spectrum') if show: plt.show() def plot_autocovariance(self, ax=None, show=True): if show: fig, ax = plt.subplots() ax.set_title('Autocovariance') acov = self.autocovariance() ax.stem(list(range(len(acov))), acov) ax.set_xlim(-0.5, len(acov) - 0.5) ax.set_xlabel('time') ax.set_ylabel('autocovariance') if show: plt.show() def plot_simulation(self, ax=None, show=True): if show: fig, ax = plt.subplots() ax.set_title('Sample path') x_out = self.simulation() ax.plot(x_out) ax.set_xlabel('time') ax.set_ylabel('state space') if show: plt.show() def quad_plot(self): """ Plots the impulse response, spectral_density, autocovariance, and one realization of the process. """ num_rows, num_cols = 2, 2 fig, axes = plt.subplots(num_rows, num_cols, figsize=(12, 8)) plt.subplots_adjust(hspace=0.4) plot_functions = [self.plot_impulse_response, self.plot_spectral_density, self.plot_autocovariance, self.plot_simulation] for plot_func, ax in zip(plot_functions, axes.flatten()): plot_func(ax, show=False) plt.show()
bsd-3-clause
ZhangXinNan/tensorflow
tensorflow/python/estimator/inputs/queues/feeding_functions.py
20
19127
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper functions for enqueuing data from arrays and pandas `DataFrame`s.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import random import types as tp import numpy as np import six from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.summary import summary from tensorflow.python.training import queue_runner try: # pylint: disable=g-import-not-at-top import pandas as pd HAS_PANDAS = True except IOError: # Pandas writes a temporary file during import. If it fails, don't use pandas. HAS_PANDAS = False except ImportError: HAS_PANDAS = False def _fill_array(arr, seq, fillvalue=0): """Recursively fills padded arr with elements from seq. If length of seq is less than arr padded length, fillvalue used. Args: arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len]. seq: Non-padded list of data samples of shape [batch_size, ..., padded_dim(None)] fillvalue: Default fillvalue to use. """ if arr.ndim == 1: try: len_ = len(seq) except TypeError: len_ = 0 arr[:len_] = seq arr[len_:] = fillvalue else: for subarr, subseq in six.moves.zip_longest(arr, seq, fillvalue=()): _fill_array(subarr, subseq, fillvalue) def _pad_if_needed(batch_key_item, fillvalue=0): """ Returns padded batch. Args: batch_key_item: List of data samples of any type with shape [batch_size, ..., padded_dim(None)]. fillvalue: Default fillvalue to use. Returns: Padded with zeros tensor of same type and shape [batch_size, ..., max_padded_dim_len]. Raises: ValueError if data samples have different shapes (except last padded dim). """ shapes = [ seq.shape[:-1] if len(seq.shape) > 0 else -1 for seq in batch_key_item ] if not all(shapes[0] == x for x in shapes): raise ValueError("Array shapes must match.") last_length = [ seq.shape[-1] if len(seq.shape) > 0 else 0 for seq in batch_key_item ] if all([x == last_length[0] for x in last_length]): return batch_key_item batch_size = len(batch_key_item) max_sequence_length = max(last_length) result_batch = np.zeros( shape=[batch_size] + list(shapes[0]) + [max_sequence_length], dtype=batch_key_item[0].dtype) _fill_array(result_batch, batch_key_item, fillvalue) return result_batch def _get_integer_indices_for_next_batch(batch_indices_start, batch_size, epoch_end, array_length, current_epoch, total_epochs): """Returns the integer indices for next batch. If total epochs is not None and current epoch is the final epoch, the end index of the next batch should not exceed the `epoch_end` (i.e., the final batch might not have size `batch_size` to avoid overshooting the last epoch). Args: batch_indices_start: Integer, the index to start next batch. batch_size: Integer, size of batches to return. epoch_end: Integer, the end index of the epoch. The epoch could start from a random position, so `epoch_end` provides the end index for that. array_length: Integer, the length of the array. current_epoch: Integer, the epoch number has been emitted. total_epochs: Integer or `None`, the total number of epochs to emit. If `None` will run forever. Returns: A tuple of a list with integer indices for next batch and `current_epoch` value after the next batch. Raises: OutOfRangeError if `current_epoch` is not less than `total_epochs`. """ if total_epochs is not None and current_epoch >= total_epochs: raise errors.OutOfRangeError(None, None, "Already emitted %s epochs." % current_epoch) batch_indices_end = batch_indices_start + batch_size batch_indices = [ j % array_length for j in range(batch_indices_start, batch_indices_end) ] epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end] current_epoch += len(epoch_end_indices) if total_epochs is None or current_epoch < total_epochs: return (batch_indices, current_epoch) # Now we might have emitted more data for expected epochs. Need to trim. final_epoch_end_inclusive = epoch_end_indices[ -(current_epoch - total_epochs + 1)] batch_indices = batch_indices[:final_epoch_end_inclusive + 1] return (batch_indices, total_epochs) class _ArrayFeedFn(object): """Creates feed dictionaries from numpy arrays.""" def __init__(self, placeholders, array, batch_size, random_start=False, seed=None, num_epochs=None): if len(placeholders) != 2: raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format( len(placeholders))) self._placeholders = placeholders self._array = array self._max = len(array) self._batch_size = batch_size self._num_epochs = num_epochs self._epoch = 0 random.seed(seed) self._trav = random.randrange(self._max) if random_start else 0 self._epoch_end = (self._trav - 1) % self._max def __call__(self): integer_indexes, self._epoch = _get_integer_indices_for_next_batch( batch_indices_start=self._trav, batch_size=self._batch_size, epoch_end=self._epoch_end, array_length=self._max, current_epoch=self._epoch, total_epochs=self._num_epochs) self._trav = (integer_indexes[-1] + 1) % self._max return { self._placeholders[0]: integer_indexes, self._placeholders[1]: self._array[integer_indexes] } class _OrderedDictNumpyFeedFn(object): """Creates feed dictionaries from `OrderedDict`s of numpy arrays.""" def __init__(self, placeholders, ordered_dict_of_arrays, batch_size, random_start=False, seed=None, num_epochs=None): if len(placeholders) != len(ordered_dict_of_arrays) + 1: raise ValueError("Expected {} placeholders; got {}.".format( len(ordered_dict_of_arrays), len(placeholders))) self._index_placeholder = placeholders[0] self._col_placeholders = placeholders[1:] self._ordered_dict_of_arrays = ordered_dict_of_arrays self._max = len(next(iter(ordered_dict_of_arrays.values()))) for _, v in ordered_dict_of_arrays.items(): if len(v) != self._max: raise ValueError("Array lengths must match.") self._batch_size = batch_size self._num_epochs = num_epochs self._epoch = 0 random.seed(seed) self._trav = random.randrange(self._max) if random_start else 0 self._epoch_end = (self._trav - 1) % self._max def __call__(self): integer_indexes, self._epoch = _get_integer_indices_for_next_batch( batch_indices_start=self._trav, batch_size=self._batch_size, epoch_end=self._epoch_end, array_length=self._max, current_epoch=self._epoch, total_epochs=self._num_epochs) self._trav = (integer_indexes[-1] + 1) % self._max feed_dict = {self._index_placeholder: integer_indexes} cols = [ column[integer_indexes] for column in self._ordered_dict_of_arrays.values() ] feed_dict.update(dict(zip(self._col_placeholders, cols))) return feed_dict class _PandasFeedFn(object): """Creates feed dictionaries from pandas `DataFrames`.""" def __init__(self, placeholders, dataframe, batch_size, random_start=False, seed=None, num_epochs=None): if len(placeholders) != len(dataframe.columns) + 1: raise ValueError("Expected {} placeholders; got {}.".format( len(dataframe.columns) + 1, len(placeholders))) self._index_placeholder = placeholders[0] self._col_placeholders = placeholders[1:] self._dataframe = dataframe self._max = len(dataframe) self._batch_size = batch_size self._num_epochs = num_epochs self._epoch = 0 random.seed(seed) self._trav = random.randrange(self._max) if random_start else 0 self._epoch_end = (self._trav - 1) % self._max def __call__(self): integer_indexes, self._epoch = _get_integer_indices_for_next_batch( batch_indices_start=self._trav, batch_size=self._batch_size, epoch_end=self._epoch_end, array_length=self._max, current_epoch=self._epoch, total_epochs=self._num_epochs) self._trav = (integer_indexes[-1] + 1) % self._max result = self._dataframe.iloc[integer_indexes] cols = [result[col].values for col in result.columns] feed_dict = dict(zip(self._col_placeholders, cols)) feed_dict[self._index_placeholder] = result.index.values return feed_dict class _GeneratorFeedFn(object): """Creates feed dictionaries from `Generator` of `dicts` of numpy arrays.""" def __init__(self, placeholders, generator, batch_size, random_start=False, seed=None, num_epochs=None, pad_value=None): first_sample = next(generator()) if len(placeholders) != len(first_sample): raise ValueError("Expected {} placeholders; got {}.".format( len(first_sample), len(placeholders))) self._keys = sorted(list(first_sample.keys())) self._col_placeholders = placeholders self._generator_function = generator self._iterator = generator() self._batch_size = batch_size self._num_epochs = num_epochs self._epoch = 0 self._pad_value = pad_value random.seed(seed) def __call__(self): if self._num_epochs and self._epoch >= self._num_epochs: raise errors.OutOfRangeError(None, None, "Already emitted %s epochs." % self._epoch) list_dict = {} list_dict_size = 0 while list_dict_size < self._batch_size: try: data_row = next(self._iterator) except StopIteration: self._epoch += 1 self._iterator = self._generator_function() data_row = next(self._iterator) for index, key in enumerate(self._keys): if key not in data_row.keys(): raise KeyError("key mismatch between dicts emitted by GenFun " "Expected {} keys; got {}".format( self._keys, data_row.keys())) list_dict.setdefault(self._col_placeholders[index], list()).append( data_row[key]) list_dict_size += 1 if self._pad_value is not None: feed_dict = { key: np.asarray(_pad_if_needed(item, self._pad_value)) for key, item in list(list_dict.items()) } else: feed_dict = { key: np.asarray(item) for key, item in list(list_dict.items()) } return feed_dict def _enqueue_data(data, capacity, shuffle=False, min_after_dequeue=None, num_threads=1, seed=None, name="enqueue_input", enqueue_size=1, num_epochs=None, pad_value=None): """Creates a queue filled from a numpy array or pandas `DataFrame`. Returns a queue filled with the rows of the given (`OrderedDict` of) array or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued `Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of) numpy arrays, the first enqueued `Tensor` contains the row number. Args: data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read into the queue. capacity: the capacity of the queue. shuffle: whether or not to shuffle the rows of the array. min_after_dequeue: minimum number of elements that can remain in the queue after a dequeue operation. Only used when `shuffle` is true. If not set, defaults to `capacity` / 4. num_threads: number of threads used for reading and enqueueing. seed: used to seed shuffling and reader starting points. name: a scope name identifying the data. enqueue_size: the number of rows to enqueue per step. num_epochs: limit enqueuing to a specified number of epochs, if provided. pad_value: default value for dynamic padding of data samples, if provided. Returns: A queue filled with the rows of the given (`OrderedDict` of) array or `DataFrame`. Raises: TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy arrays, a numpy `ndarray`, or a generator producing these. NotImplementedError: padding and shuffling data at the same time. NotImplementedError: padding usage with non generator data type. """ with ops.name_scope(name): if isinstance(data, np.ndarray): types = [dtypes.int64, dtypes.as_dtype(data.dtype)] queue_shapes = [(), data.shape[1:]] get_feed_fn = _ArrayFeedFn elif isinstance(data, collections.OrderedDict): types = [dtypes.int64 ] + [dtypes.as_dtype(col.dtype) for col in data.values()] queue_shapes = [()] + [col.shape[1:] for col in data.values()] get_feed_fn = _OrderedDictNumpyFeedFn elif isinstance(data, tp.FunctionType): x_first_el = six.next(data()) x_first_keys = sorted(x_first_el.keys()) x_first_values = [x_first_el[key] for key in x_first_keys] types = [dtypes.as_dtype(col.dtype) for col in x_first_values] queue_shapes = [col.shape for col in x_first_values] get_feed_fn = _GeneratorFeedFn elif HAS_PANDAS and isinstance(data, pd.DataFrame): types = [ dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes) ] queue_shapes = [() for _ in types] get_feed_fn = _PandasFeedFn else: raise TypeError( "data must be either a numpy array or pandas DataFrame if pandas is " "installed; got {}".format(type(data).__name__)) pad_data = pad_value is not None if pad_data and get_feed_fn is not _GeneratorFeedFn: raise NotImplementedError( "padding is only available with generator usage") if shuffle and pad_data: raise NotImplementedError( "padding and shuffling data at the same time is not implemented") # TODO(jamieas): TensorBoard warnings for all warnings below once available. if num_threads > 1 and num_epochs is not None: logging.warning( "enqueue_data was called with num_epochs and num_threads > 1. " "num_epochs is applied per thread, so this will produce more " "epochs than you probably intend. " "If you want to limit epochs, use one thread.") if shuffle and num_threads > 1 and num_epochs is not None: logging.warning( "enqueue_data was called with shuffle=True, num_threads > 1, and " "num_epochs. This will create multiple threads, all reading the " "array/dataframe in order adding to the same shuffling queue; the " "results will likely not be sufficiently shuffled.") if not shuffle and num_threads > 1: logging.warning( "enqueue_data was called with shuffle=False and num_threads > 1. " "This will create multiple threads, all reading the " "array/dataframe in order. If you want examples read in order, use" " one thread; if you want multiple threads, enable shuffling.") if shuffle: min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else min_after_dequeue) queue = data_flow_ops.RandomShuffleQueue( capacity, min_after_dequeue, dtypes=types, shapes=queue_shapes, seed=seed) elif pad_data: min_after_dequeue = 0 # just for the summary text queue_shapes = list( map(lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x, queue_shapes)) queue = data_flow_ops.PaddingFIFOQueue( capacity, dtypes=types, shapes=queue_shapes) else: min_after_dequeue = 0 # just for the summary text queue = data_flow_ops.FIFOQueue( capacity, dtypes=types, shapes=queue_shapes) enqueue_ops = [] feed_fns = [] for i in range(num_threads): # Note the placeholders have no shapes, so they will accept any # enqueue_size. enqueue_many below will break them up. placeholders = [array_ops.placeholder(t) for t in types] enqueue_ops.append(queue.enqueue_many(placeholders)) seed_i = None if seed is None else (i + 1) * seed if not pad_data: feed_fns.append( get_feed_fn( placeholders, data, enqueue_size, random_start=shuffle, seed=seed_i, num_epochs=num_epochs)) else: feed_fns.append( get_feed_fn( placeholders, data, enqueue_size, random_start=shuffle, seed=seed_i, num_epochs=num_epochs, pad_value=pad_value)) runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns) queue_runner.add_queue_runner(runner) full = ( math_ops.cast( math_ops.maximum(0, queue.size() - min_after_dequeue), dtypes.float32) * (1. / (capacity - min_after_dequeue))) # Note that name contains a '/' at the end so we intentionally do not place # a '/' after %s below. summary_name = ("queue/%sfraction_over_%d_of_%d_full" % (queue.name, min_after_dequeue, capacity - min_after_dequeue)) summary.scalar(summary_name, full) return queue
apache-2.0
zymsys/sms-tools
software/models_interface/sprModel_function.py
18
3422
# function to call the main analysis/synthesis functions in software/models/sprModel.py import numpy as np import matplotlib.pyplot as plt import os, sys from scipy.signal import get_window sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/')) import utilFunctions as UF import sprModel as SPR import stft as STFT def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80, minSineDur=0.02, maxnSines=150, freqDevOffset=10, freqDevSlope=0.001): """ inputFile: input sound file (monophonic with sampling rate of 44100) window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris) M: analysis window size N: fft size (power of two, bigger or equal than M) t: magnitude threshold of spectral peaks minSineDur: minimum duration of sinusoidal tracks maxnSines: maximum number of parallel sinusoids freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0 freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation """ # size of fft used in synthesis Ns = 512 # hop size (has to be 1/4 of Ns) H = 128 # read input sound (fs, x) = UF.wavread(inputFile) # compute analysis window w = get_window(window, M) # perform sinusoidal plus residual analysis tfreq, tmag, tphase, xr = SPR.sprModelAnal(x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope) # compute spectrogram of residual mXr, pXr = STFT.stftAnal(xr, fs, w, N, H) # sum sinusoids and residual y, ys = SPR.sprModelSynth(tfreq, tmag, tphase, xr, Ns, H, fs) # output sound file (monophonic with sampling rate of 44100) outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel_sines.wav' outputFileResidual = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel_residual.wav' outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel.wav' # write sounds files for sinusoidal, residual, and the sum UF.wavwrite(ys, fs, outputFileSines) UF.wavwrite(xr, fs, outputFileResidual) UF.wavwrite(y, fs, outputFile) # create figure to show plots plt.figure(figsize=(12, 9)) # frequency range to plot maxplotfreq = 5000.0 # plot the input sound plt.subplot(3,1,1) plt.plot(np.arange(x.size)/float(fs), x) plt.axis([0, x.size/float(fs), min(x), max(x)]) plt.ylabel('amplitude') plt.xlabel('time (sec)') plt.title('input sound: x') # plot the magnitude spectrogram of residual plt.subplot(3,1,2) maxplotbin = int(N*maxplotfreq/fs) numFrames = int(mXr[:,0].size) frmTime = H*np.arange(numFrames)/float(fs) binFreq = np.arange(maxplotbin+1)*float(fs)/N plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1])) plt.autoscale(tight=True) # plot the sinusoidal frequencies on top of the residual spectrogram if (tfreq.shape[1] > 0): tracks = tfreq*np.less(tfreq, maxplotfreq) tracks[tracks<=0] = np.nan plt.plot(frmTime, tracks, color='k') plt.title('sinusoidal tracks + residual spectrogram') plt.autoscale(tight=True) # plot the output sound plt.subplot(3,1,3) plt.plot(np.arange(y.size)/float(fs), y) plt.axis([0, y.size/float(fs), min(y), max(y)]) plt.ylabel('amplitude') plt.xlabel('time (sec)') plt.title('output sound: y') plt.tight_layout() plt.show() if __name__ == "__main__": main()
agpl-3.0
phdowling/scikit-learn
examples/ensemble/plot_gradient_boosting_oob.py
230
4762
""" ====================================== Gradient Boosting Out-of-Bag estimates ====================================== Out-of-bag (OOB) estimates can be a useful heuristic to estimate the "optimal" number of boosting iterations. OOB estimates are almost identical to cross-validation estimates but they can be computed on-the-fly without the need for repeated model fitting. OOB estimates are only available for Stochastic Gradient Boosting (i.e. ``subsample < 1.0``), the estimates are derived from the improvement in loss based on the examples not included in the bootstrap sample (the so-called out-of-bag examples). The OOB estimator is a pessimistic estimator of the true test loss, but remains a fairly good approximation for a small number of trees. The figure shows the cumulative sum of the negative OOB improvements as a function of the boosting iteration. As you can see, it tracks the test loss for the first hundred iterations but then diverges in a pessimistic way. The figure also shows the performance of 3-fold cross validation which usually gives a better estimate of the test loss but is computationally more demanding. """ print(__doc__) # Author: Peter Prettenhofer <[email protected]> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn.cross_validation import KFold from sklearn.cross_validation import train_test_split # Generate data (adapted from G. Ridgeway's gbm example) n_samples = 1000 random_state = np.random.RandomState(13) x1 = random_state.uniform(size=n_samples) x2 = random_state.uniform(size=n_samples) x3 = random_state.randint(0, 4, size=n_samples) p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3))) y = random_state.binomial(1, p, size=n_samples) X = np.c_[x1, x2, x3] X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=9) # Fit classifier with out-of-bag estimates params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5, 'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3} clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) acc = clf.score(X_test, y_test) print("Accuracy: {:.4f}".format(acc)) n_estimators = params['n_estimators'] x = np.arange(n_estimators) + 1 def heldout_score(clf, X_test, y_test): """compute deviance scores on ``X_test`` and ``y_test``. """ score = np.zeros((n_estimators,), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): score[i] = clf.loss_(y_test, y_pred) return score def cv_estimate(n_folds=3): cv = KFold(n=X_train.shape[0], n_folds=n_folds) cv_clf = ensemble.GradientBoostingClassifier(**params) val_scores = np.zeros((n_estimators,), dtype=np.float64) for train, test in cv: cv_clf.fit(X_train[train], y_train[train]) val_scores += heldout_score(cv_clf, X_train[test], y_train[test]) val_scores /= n_folds return val_scores # Estimate best n_estimator using cross-validation cv_score = cv_estimate(3) # Compute best n_estimator for test data test_score = heldout_score(clf, X_test, y_test) # negative cumulative sum of oob improvements cumsum = -np.cumsum(clf.oob_improvement_) # min loss according to OOB oob_best_iter = x[np.argmin(cumsum)] # min loss according to test (normalize such that first loss is 0) test_score -= test_score[0] test_best_iter = x[np.argmin(test_score)] # min loss according to cv (normalize such that first loss is 0) cv_score -= cv_score[0] cv_best_iter = x[np.argmin(cv_score)] # color brew for the three curves oob_color = list(map(lambda x: x / 256.0, (190, 174, 212))) test_color = list(map(lambda x: x / 256.0, (127, 201, 127))) cv_color = list(map(lambda x: x / 256.0, (253, 192, 134))) # plot curves and vertical lines for best iterations plt.plot(x, cumsum, label='OOB loss', color=oob_color) plt.plot(x, test_score, label='Test loss', color=test_color) plt.plot(x, cv_score, label='CV loss', color=cv_color) plt.axvline(x=oob_best_iter, color=oob_color) plt.axvline(x=test_best_iter, color=test_color) plt.axvline(x=cv_best_iter, color=cv_color) # add three vertical lines to xticks xticks = plt.xticks() xticks_pos = np.array(xticks[0].tolist() + [oob_best_iter, cv_best_iter, test_best_iter]) xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) + ['OOB', 'CV', 'Test']) ind = np.argsort(xticks_pos) xticks_pos = xticks_pos[ind] xticks_label = xticks_label[ind] plt.xticks(xticks_pos, xticks_label) plt.legend(loc='upper right') plt.ylabel('normalized loss') plt.xlabel('number of iterations') plt.show()
bsd-3-clause
Ziqi-Li/bknqgis
pandas/pandas/tests/indexes/datetimes/test_ops.py
4
51790
import pytz import pytest import dateutil import warnings import numpy as np from datetime import timedelta from itertools import product import pandas as pd import pandas._libs.tslib as tslib import pandas.util.testing as tm from pandas.errors import PerformanceWarning from pandas.core.indexes.datetimes import cdate_range from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta, date_range, TimedeltaIndex, _np_version_under1p10, Index, datetime, Float64Index, offsets, bdate_range) from pandas.tseries.offsets import BMonthEnd, CDay, BDay from pandas.tests.test_base import Ops START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) class TestDatetimeIndexOps(Ops): tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore', 'dateutil/US/Pacific'] def setup_method(self, method): super(TestDatetimeIndexOps, self).setup_method(method) mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)) self.is_valid_objs = [o for o in self.objs if mask(o)] self.not_valid_objs = [o for o in self.objs if not mask(o)] def test_ops_properties(self): f = lambda x: isinstance(x, DatetimeIndex) self.check_ops_properties(DatetimeIndex._field_ops, f) self.check_ops_properties(DatetimeIndex._object_ops, f) self.check_ops_properties(DatetimeIndex._bool_ops, f) def test_ops_properties_basic(self): # sanity check that the behavior didn't change # GH7206 for op in ['year', 'day', 'second', 'weekday']: pytest.raises(TypeError, lambda x: getattr(self.dt_series, op)) # attribute access should still work! s = Series(dict(year=2000, month=1, day=10)) assert s.year == 2000 assert s.month == 1 assert s.day == 10 pytest.raises(AttributeError, lambda: s.weekday) def test_asobject_tolist(self): idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx') expected_list = [Timestamp('2013-01-31'), Timestamp('2013-02-28'), Timestamp('2013-03-31'), Timestamp('2013-04-30')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject assert isinstance(result, Index) assert result.dtype == object tm.assert_index_equal(result, expected) assert result.name == expected.name assert idx.tolist() == expected_list idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo') expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'), Timestamp('2013-02-28', tz='Asia/Tokyo'), Timestamp('2013-03-31', tz='Asia/Tokyo'), Timestamp('2013-04-30', tz='Asia/Tokyo')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject assert isinstance(result, Index) assert result.dtype == object tm.assert_index_equal(result, expected) assert result.name == expected.name assert idx.tolist() == expected_list idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, datetime(2013, 1, 4)], name='idx') expected_list = [Timestamp('2013-01-01'), Timestamp('2013-01-02'), pd.NaT, Timestamp('2013-01-04')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject assert isinstance(result, Index) assert result.dtype == object tm.assert_index_equal(result, expected) assert result.name == expected.name assert idx.tolist() == expected_list def test_minmax(self): for tz in self.tz: # monotonic idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], tz=tz) assert idx1.is_monotonic # non-monotonic idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03', '2011-01-02', pd.NaT], tz=tz) assert not idx2.is_monotonic for idx in [idx1, idx2]: assert idx.min() == Timestamp('2011-01-01', tz=tz) assert idx.max() == Timestamp('2011-01-03', tz=tz) assert idx.argmin() == 0 assert idx.argmax() == 2 for op in ['min', 'max']: # Return NaT obj = DatetimeIndex([]) assert pd.isna(getattr(obj, op)()) obj = DatetimeIndex([pd.NaT]) assert pd.isna(getattr(obj, op)()) obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT]) assert pd.isna(getattr(obj, op)()) def test_numpy_minmax(self): dr = pd.date_range(start='2016-01-15', end='2016-01-20') assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D') assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D') errmsg = "the 'out' parameter is not supported" tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0) tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0) assert np.argmin(dr) == 0 assert np.argmax(dr) == 5 if not _np_version_under1p10: errmsg = "the 'out' parameter is not supported" tm.assert_raises_regex( ValueError, errmsg, np.argmin, dr, out=0) tm.assert_raises_regex( ValueError, errmsg, np.argmax, dr, out=0) def test_round(self): for tz in self.tz: rng = pd.date_range(start='2016-01-01', periods=5, freq='30Min', tz=tz) elt = rng[1] expected_rng = DatetimeIndex([ Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'), ]) expected_elt = expected_rng[1] tm.assert_index_equal(rng.round(freq='H'), expected_rng) assert elt.round(freq='H') == expected_elt msg = pd.tseries.frequencies._INVALID_FREQ_ERROR with tm.assert_raises_regex(ValueError, msg): rng.round(freq='foo') with tm.assert_raises_regex(ValueError, msg): elt.round(freq='foo') msg = "<MonthEnd> is a non-fixed frequency" tm.assert_raises_regex(ValueError, msg, rng.round, freq='M') tm.assert_raises_regex(ValueError, msg, elt.round, freq='M') # GH 14440 & 15578 index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz) result = index.round('ms') expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz) tm.assert_index_equal(result, expected) for freq in ['us', 'ns']: tm.assert_index_equal(index, index.round(freq)) index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz) result = index.round('ms') expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz) tm.assert_index_equal(result, expected) index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031']) result = index.round('10ns') expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030']) tm.assert_index_equal(result, expected) with tm.assert_produces_warning(): ts = '2016-10-17 12:00:00.001501031' pd.DatetimeIndex([ts]).round('1010ns') def test_repeat_range(self): rng = date_range('1/1/2000', '1/1/2001') result = rng.repeat(5) assert result.freq is None assert len(result) == 5 * len(rng) for tz in self.tz: index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz) exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-02', '2001-01-02'], tz=tz) for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) assert res.freq is None index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz) exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-03', '2001-01-03'], tz=tz) for res in [index.repeat(2), np.repeat(index, 2)]: tm.assert_index_equal(res, exp) assert res.freq is None index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'], tz=tz) exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01', 'NaT', 'NaT', 'NaT', '2003-01-01', '2003-01-01', '2003-01-01'], tz=tz) for res in [index.repeat(3), np.repeat(index, 3)]: tm.assert_index_equal(res, exp) assert res.freq is None def test_repeat(self): reps = 2 msg = "the 'axis' parameter is not supported" for tz in self.tz: rng = pd.date_range(start='2016-01-01', periods=2, freq='30Min', tz=tz) expected_rng = DatetimeIndex([ Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'), Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'), ]) res = rng.repeat(reps) tm.assert_index_equal(res, expected_rng) assert res.freq is None tm.assert_index_equal(np.repeat(rng, reps), expected_rng) tm.assert_raises_regex(ValueError, msg, np.repeat, rng, reps, axis=1) def test_representation(self): idx = [] idx.append(DatetimeIndex([], freq='D')) idx.append(DatetimeIndex(['2011-01-01'], freq='D')) idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')) idx.append(DatetimeIndex( ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')) idx.append(DatetimeIndex( ['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00' ], freq='H', tz='Asia/Tokyo')) idx.append(DatetimeIndex( ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern')) idx.append(DatetimeIndex( ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC')) exp = [] exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""") exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', " "freq='D')") exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], " "dtype='datetime64[ns]', freq='D')") exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " "dtype='datetime64[ns]', freq='D')") exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', " "'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']" ", dtype='datetime64[ns, Asia/Tokyo]', freq='H')") exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', " "'2011-01-01 10:00:00-05:00', 'NaT'], " "dtype='datetime64[ns, US/Eastern]', freq=None)") exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', " "'2011-01-01 10:00:00+00:00', 'NaT'], " "dtype='datetime64[ns, UTC]', freq=None)""") with pd.option_context('display.width', 300): for indx, expected in zip(idx, exp): for func in ['__repr__', '__unicode__', '__str__']: result = getattr(indx, func)() assert result == expected def test_representation_to_series(self): idx1 = DatetimeIndex([], freq='D') idx2 = DatetimeIndex(['2011-01-01'], freq='D') idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = DatetimeIndex( ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo') idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern') idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15']) exp1 = """Series([], dtype: datetime64[ns])""" exp2 = """0 2011-01-01 dtype: datetime64[ns]""" exp3 = """0 2011-01-01 1 2011-01-02 dtype: datetime64[ns]""" exp4 = """0 2011-01-01 1 2011-01-02 2 2011-01-03 dtype: datetime64[ns]""" exp5 = """0 2011-01-01 09:00:00+09:00 1 2011-01-01 10:00:00+09:00 2 2011-01-01 11:00:00+09:00 dtype: datetime64[ns, Asia/Tokyo]""" exp6 = """0 2011-01-01 09:00:00-05:00 1 2011-01-01 10:00:00-05:00 2 NaT dtype: datetime64[ns, US/Eastern]""" exp7 = """0 2011-01-01 09:00:00 1 2011-01-02 10:15:00 dtype: datetime64[ns]""" with pd.option_context('display.width', 300): for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7], [exp1, exp2, exp3, exp4, exp5, exp6, exp7]): result = repr(Series(idx)) assert result == expected def test_summary(self): # GH9116 idx1 = DatetimeIndex([], freq='D') idx2 = DatetimeIndex(['2011-01-01'], freq='D') idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = DatetimeIndex( ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo') idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern') exp1 = """DatetimeIndex: 0 entries Freq: D""" exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01 Freq: D""" exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02 Freq: D""" exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03 Freq: D""" exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 " "to 2011-01-01 11:00:00+09:00\n" "Freq: H") exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6], [exp1, exp2, exp3, exp4, exp5, exp6]): result = idx.summary() assert result == expected def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], ['day', 'day', 'day', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond']): for tz in self.tz: idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz) assert idx.resolution == expected def test_union(self): for tz in self.tz: # union rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz) rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz) rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other3 = pd.DatetimeIndex([], tz=tz) expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3)]: result_union = rng.union(other) tm.assert_index_equal(result_union, expected) def test_add_iadd(self): for tz in self.tz: # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) result = rng + delta expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz) tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) # int rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) result = rng + 1 expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(result, expected) rng += 1 tm.assert_index_equal(rng, expected) idx = DatetimeIndex(['2011-01-01', '2011-01-02']) msg = "cannot add a datelike to a DatetimeIndex" with tm.assert_raises_regex(TypeError, msg): idx + Timestamp('2011-01-01') with tm.assert_raises_regex(TypeError, msg): Timestamp('2011-01-01') + idx def test_add_dti_dti(self): # previously performed setop (deprecated in 0.16.0), now raises # TypeError (GH14164) dti = date_range('20130101', periods=3) dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern') with pytest.raises(TypeError): dti + dti with pytest.raises(TypeError): dti_tz + dti_tz with pytest.raises(TypeError): dti_tz + dti with pytest.raises(TypeError): dti + dti_tz def test_difference(self): for tz in self.tz: # diff rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz) rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other3 = pd.DatetimeIndex([], tz=tz) expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3)]: result_diff = rng.difference(other) tm.assert_index_equal(result_diff, expected) def test_sub_isub(self): for tz in self.tz: # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz) result = rng - delta tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) # int rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) result = rng - 1 expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(result, expected) rng -= 1 tm.assert_index_equal(rng, expected) def test_sub_dti_dti(self): # previously performed setop (deprecated in 0.16.0), now changed to # return subtraction -> TimeDeltaIndex (GH ...) dti = date_range('20130101', periods=3) dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern') dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC') expected = TimedeltaIndex([0, 0, 0]) result = dti - dti tm.assert_index_equal(result, expected) result = dti_tz - dti_tz tm.assert_index_equal(result, expected) with pytest.raises(TypeError): dti_tz - dti with pytest.raises(TypeError): dti - dti_tz with pytest.raises(TypeError): dti_tz - dti_tz2 # isub dti -= dti tm.assert_index_equal(dti, expected) # different length raises ValueError dti1 = date_range('20130101', periods=3) dti2 = date_range('20130101', periods=4) with pytest.raises(ValueError): dti1 - dti2 # NaN propagation dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03']) dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan]) expected = TimedeltaIndex(['1 days', np.nan, np.nan]) result = dti2 - dti1 tm.assert_index_equal(result, expected) def test_sub_period(self): # GH 13078 # not supported, check TypeError p = pd.Period('2011-01-01', freq='D') for freq in [None, 'D']: idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq) with pytest.raises(TypeError): idx - p with pytest.raises(TypeError): p - idx def test_comp_nat(self): left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]) right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]) for l, r in [(left, right), (left.asobject, right.asobject)]: result = l == r expected = np.array([False, False, True]) tm.assert_numpy_array_equal(result, expected) result = l != r expected = np.array([True, True, False]) tm.assert_numpy_array_equal(result, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l == pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT == r, expected) expected = np.array([True, True, True]) tm.assert_numpy_array_equal(l != pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT != l, expected) expected = np.array([False, False, False]) tm.assert_numpy_array_equal(l < pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT > l, expected) def test_value_counts_unique(self): # GH 7735 for tz in self.tz: idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10) # create repeated values, 'n'th element is repeated by n+1 times idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz) exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz) expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(idx.unique(), expected) idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz) exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz) expected = Series([3, 2], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(), expected) exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz) expected = Series([3, 2, 1], index=exp_idx) for obj in [idx, Series(idx)]: tm.assert_series_equal(obj.value_counts(dropna=False), expected) tm.assert_index_equal(idx.unique(), exp_idx) def test_nonunique_contains(self): # GH 9512 for idx in map(DatetimeIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1], ['2015', '2015', '2016'], ['2015', '2015', '2014'])): assert idx[0] in idx def test_order(self): # with freq idx1 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D', name='idx') idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo', name='tzidx') for idx in [idx1, idx2]: ordered = idx.sort_values() tm.assert_index_equal(ordered, idx) assert ordered.freq == idx.freq ordered = idx.sort_values(ascending=False) expected = idx[::-1] tm.assert_index_equal(ordered, expected) assert ordered.freq == expected.freq assert ordered.freq.n == -1 ordered, indexer = idx.sort_values(return_indexer=True) tm.assert_index_equal(ordered, idx) tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False) assert ordered.freq == idx.freq ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) expected = idx[::-1] tm.assert_index_equal(ordered, expected) tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False) assert ordered.freq == expected.freq assert ordered.freq.n == -1 # without freq for tz in self.tz: idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], tz=tz, name='idx1') exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], tz=tz, name='idx1') idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], tz=tz, name='idx2') exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], tz=tz, name='idx2') idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05', '2011-01-02', pd.NaT], tz=tz, name='idx3') exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03', '2011-01-05'], tz=tz, name='idx3') for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]: ordered = idx.sort_values() tm.assert_index_equal(ordered, expected) assert ordered.freq is None ordered = idx.sort_values(ascending=False) tm.assert_index_equal(ordered, expected[::-1]) assert ordered.freq is None ordered, indexer = idx.sort_values(return_indexer=True) tm.assert_index_equal(ordered, expected) exp = np.array([0, 4, 3, 1, 2]) tm.assert_numpy_array_equal(indexer, exp, check_dtype=False) assert ordered.freq is None ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) tm.assert_index_equal(ordered, expected[::-1]) exp = np.array([2, 1, 3, 4, 0]) tm.assert_numpy_array_equal(indexer, exp, check_dtype=False) assert ordered.freq is None def test_getitem(self): idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx') for idx in [idx1, idx2]: result = idx[0] assert result == Timestamp('2011-01-01', tz=idx.tz) result = idx[0:5] expected = pd.date_range('2011-01-01', '2011-01-05', freq='D', tz=idx.tz, name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx[0:10:2] expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D', tz=idx.tz, name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx[-20:-5:3] expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D', tz=idx.tz, name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx[4::-1] expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03', '2011-01-02', '2011-01-01'], freq='-1D', tz=idx.tz, name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq def test_drop_duplicates_metadata(self): # GH 10115 idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') result = idx.drop_duplicates() tm.assert_index_equal(idx, result) assert idx.freq == result.freq idx_dup = idx.append(idx) assert idx_dup.freq is None # freq is reset result = idx_dup.drop_duplicates() tm.assert_index_equal(idx, result) assert result.freq is None def test_drop_duplicates(self): # to check Index/Series compat base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') idx = base.append(base[:5]) res = idx.drop_duplicates() tm.assert_index_equal(res, base) res = Series(idx).drop_duplicates() tm.assert_series_equal(res, Series(base)) res = idx.drop_duplicates(keep='last') exp = base[5:].append(base[:5]) tm.assert_index_equal(res, exp) res = Series(idx).drop_duplicates(keep='last') tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36))) res = idx.drop_duplicates(keep=False) tm.assert_index_equal(res, base[5:]) res = Series(idx).drop_duplicates(keep=False) tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31))) def test_take(self): # GH 10295 idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx') for idx in [idx1, idx2]: result = idx.take([0]) assert result == Timestamp('2011-01-01', tz=idx.tz) result = idx.take([0, 1, 2]) expected = pd.date_range('2011-01-01', '2011-01-03', freq='D', tz=idx.tz, name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx.take([0, 2, 4]) expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D', tz=idx.tz, name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx.take([7, 4, 1]) expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D', tz=idx.tz, name='idx') tm.assert_index_equal(result, expected) assert result.freq == expected.freq result = idx.take([3, 2, 5]) expected = DatetimeIndex(['2011-01-04', '2011-01-03', '2011-01-06'], freq=None, tz=idx.tz, name='idx') tm.assert_index_equal(result, expected) assert result.freq is None result = idx.take([-3, 2, 5]) expected = DatetimeIndex(['2011-01-29', '2011-01-03', '2011-01-06'], freq=None, tz=idx.tz, name='idx') tm.assert_index_equal(result, expected) assert result.freq is None def test_take_invalid_kwargs(self): idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') indices = [1, 6, 5, 9, 10, 13, 15, 3] msg = r"take\(\) got an unexpected keyword argument 'foo'" tm.assert_raises_regex(TypeError, msg, idx.take, indices, foo=2) msg = "the 'out' parameter is not supported" tm.assert_raises_regex(ValueError, msg, idx.take, indices, out=indices) msg = "the 'mode' parameter is not supported" tm.assert_raises_regex(ValueError, msg, idx.take, indices, mode='clip') def test_infer_freq(self): # GH 11018 for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D', '-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']: idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10) result = pd.DatetimeIndex(idx.asi8, freq='infer') tm.assert_index_equal(idx, result) assert result.freq == freq def test_nat_new(self): idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x') result = idx._nat_new() exp = pd.DatetimeIndex([pd.NaT] * 5, name='x') tm.assert_index_equal(result, exp) result = idx._nat_new(box=False) exp = np.array([tslib.iNaT] * 5, dtype=np.int64) tm.assert_numpy_array_equal(result, exp) def test_shift(self): # GH 9903 for tz in self.tz: idx = pd.DatetimeIndex([], name='xxx', tz=tz) tm.assert_index_equal(idx.shift(0, freq='H'), idx) tm.assert_index_equal(idx.shift(3, freq='H'), idx) idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00' '2011-01-01 12:00'], name='xxx', tz=tz) tm.assert_index_equal(idx.shift(0, freq='H'), idx) exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00' '2011-01-01 15:00'], name='xxx', tz=tz) tm.assert_index_equal(idx.shift(3, freq='H'), exp) exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00' '2011-01-01 09:00'], name='xxx', tz=tz) tm.assert_index_equal(idx.shift(-3, freq='H'), exp) def test_nat(self): assert pd.DatetimeIndex._na_value is pd.NaT assert pd.DatetimeIndex([])._na_value is pd.NaT for tz in [None, 'US/Eastern', 'UTC']: idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz) assert idx._can_hold_na tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) assert not idx.hasnans tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp)) idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz) assert idx._can_hold_na tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) assert idx.hasnans tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp)) def test_equals(self): # GH 13107 for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']: idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT']) assert idx.equals(idx) assert idx.equals(idx.copy()) assert idx.equals(idx.asobject) assert idx.asobject.equals(idx) assert idx.asobject.equals(idx.asobject) assert not idx.equals(list(idx)) assert not idx.equals(pd.Series(idx)) idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'], tz='US/Pacific') assert not idx.equals(idx2) assert not idx.equals(idx2.copy()) assert not idx.equals(idx2.asobject) assert not idx.asobject.equals(idx2) assert not idx.equals(list(idx2)) assert not idx.equals(pd.Series(idx2)) # same internal, different tz idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific') tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) assert not idx.equals(idx3) assert not idx.equals(idx3.copy()) assert not idx.equals(idx3.asobject) assert not idx.asobject.equals(idx3) assert not idx.equals(list(idx3)) assert not idx.equals(pd.Series(idx3)) class TestDateTimeIndexToJulianDate(object): def test_1700(self): r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5, 2345901.5]) r2 = date_range(start=Timestamp('1710-10-01'), periods=5, freq='D').to_julian_date() assert isinstance(r2, Float64Index) tm.assert_index_equal(r1, r2) def test_2000(self): r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5, 2451605.5]) r2 = date_range(start=Timestamp('2000-02-27'), periods=5, freq='D').to_julian_date() assert isinstance(r2, Float64Index) tm.assert_index_equal(r1, r2) def test_hour(self): r1 = Float64Index( [2451601.5, 2451601.5416666666666666, 2451601.5833333333333333, 2451601.625, 2451601.6666666666666666]) r2 = date_range(start=Timestamp('2000-02-27'), periods=5, freq='H').to_julian_date() assert isinstance(r2, Float64Index) tm.assert_index_equal(r1, r2) def test_minute(self): r1 = Float64Index( [2451601.5, 2451601.5006944444444444, 2451601.5013888888888888, 2451601.5020833333333333, 2451601.5027777777777777]) r2 = date_range(start=Timestamp('2000-02-27'), periods=5, freq='T').to_julian_date() assert isinstance(r2, Float64Index) tm.assert_index_equal(r1, r2) def test_second(self): r1 = Float64Index( [2451601.5, 2451601.500011574074074, 2451601.5000231481481481, 2451601.5000347222222222, 2451601.5000462962962962]) r2 = date_range(start=Timestamp('2000-02-27'), periods=5, freq='S').to_julian_date() assert isinstance(r2, Float64Index) tm.assert_index_equal(r1, r2) # GH 10699 @pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex], [tm.assert_series_equal, tm.assert_index_equal])) def test_datetime64_with_DateOffset(klass, assert_func): s = klass(date_range('2000-01-01', '2000-01-31'), name='a') result = s + pd.DateOffset(years=1) result2 = pd.DateOffset(years=1) + s exp = klass(date_range('2001-01-01', '2001-01-31'), name='a') assert_func(result, exp) assert_func(result2, exp) result = s - pd.DateOffset(years=1) exp = klass(date_range('1999-01-01', '1999-01-31'), name='a') assert_func(result, exp) s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), pd.Timestamp('2000-02-15', tz='US/Central')], name='a') result = s + pd.offsets.Day() result2 = pd.offsets.Day() + s exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'), Timestamp('2000-02-16', tz='US/Central')], name='a') assert_func(result, exp) assert_func(result2, exp) s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), pd.Timestamp('2000-02-15', tz='US/Central')], name='a') result = s + pd.offsets.MonthEnd() result2 = pd.offsets.MonthEnd() + s exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'), Timestamp('2000-02-29', tz='US/Central')], name='a') assert_func(result, exp) assert_func(result2, exp) # array of offsets - valid for Series only if klass is Series: with tm.assert_produces_warning(PerformanceWarning): s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')]) result = s + Series([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]) exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29') ]) assert_func(result, exp) # same offset result = s + Series([pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]) exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')]) assert_func(result, exp) s = klass([Timestamp('2000-01-05 00:15:00'), Timestamp('2000-01-31 00:23:00'), Timestamp('2000-01-01'), Timestamp('2000-03-31'), Timestamp('2000-02-29'), Timestamp('2000-12-31'), Timestamp('2000-05-15'), Timestamp('2001-06-15')]) # DateOffset relativedelta fastpath relative_kwargs = [('years', 2), ('months', 5), ('days', 3), ('hours', 5), ('minutes', 10), ('seconds', 2), ('microseconds', 5)] for i, kwd in enumerate(relative_kwargs): op = pd.DateOffset(**dict([kwd])) assert_func(klass([x + op for x in s]), s + op) assert_func(klass([x - op for x in s]), s - op) op = pd.DateOffset(**dict(relative_kwargs[:i + 1])) assert_func(klass([x + op for x in s]), s + op) assert_func(klass([x - op for x in s]), s - op) # assert these are equal on a piecewise basis offsets = ['YearBegin', ('YearBegin', {'month': 5}), 'YearEnd', ('YearEnd', {'month': 5}), 'MonthBegin', 'MonthEnd', 'SemiMonthEnd', 'SemiMonthBegin', 'Week', ('Week', {'weekday': 3}), 'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin', 'CustomBusinessDay', 'CDay', 'CBMonthEnd', 'CBMonthBegin', 'BMonthBegin', 'BMonthEnd', 'BusinessHour', 'BYearBegin', 'BYearEnd', 'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}), ('FY5253Quarter', {'qtr_with_extra_week': 1, 'startingMonth': 1, 'weekday': 2, 'variation': 'nearest'}), ('FY5253', {'weekday': 0, 'startingMonth': 2, 'variation': 'nearest'}), ('WeekOfMonth', {'weekday': 2, 'week': 2}), 'Easter', ('DateOffset', {'day': 4}), ('DateOffset', {'month': 5})] with warnings.catch_warnings(record=True): for normalize in (True, False): for do in offsets: if isinstance(do, tuple): do, kwargs = do else: do = do kwargs = {} for n in [0, 5]: if (do in ['WeekOfMonth', 'LastWeekOfMonth', 'FY5253Quarter', 'FY5253'] and n == 0): continue op = getattr(pd.offsets, do)(n, normalize=normalize, **kwargs) assert_func(klass([x + op for x in s]), s + op) assert_func(klass([x - op for x in s]), s - op) assert_func(klass([op + x for x in s]), op + s) @pytest.mark.parametrize('years,months', product([-1, 0, 1], [-2, 0, 2])) def test_shift_months(years, months): s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'), Timestamp('2000-01-31 00:23:00'), Timestamp('2000-01-01'), Timestamp('2000-02-29'), Timestamp('2000-12-31')]) actual = DatetimeIndex(tslib.shift_months(s.asi8, years * 12 + months)) expected = DatetimeIndex([x + offsets.DateOffset( years=years, months=months) for x in s]) tm.assert_index_equal(actual, expected) class TestBusinessDatetimeIndex(object): def setup_method(self, method): self.rng = bdate_range(START, END) def test_comparison(self): d = self.rng[10] comp = self.rng > d assert comp[11] assert not comp[9] def test_pickle_unpickle(self): unpickled = tm.round_trip_pickle(self.rng) assert unpickled.offset is not None def test_copy(self): cp = self.rng.copy() repr(cp) tm.assert_index_equal(cp, self.rng) def test_repr(self): # only really care that it works repr(self.rng) def test_getitem(self): smaller = self.rng[:5] exp = DatetimeIndex(self.rng.view(np.ndarray)[:5]) tm.assert_index_equal(smaller, exp) assert smaller.offset == self.rng.offset sliced = self.rng[::5] assert sliced.offset == BDay() * 5 fancy_indexed = self.rng[[4, 3, 2, 1, 0]] assert len(fancy_indexed) == 5 assert isinstance(fancy_indexed, DatetimeIndex) assert fancy_indexed.freq is None # 32-bit vs. 64-bit platforms assert self.rng[4] == self.rng[np.int_(4)] def test_getitem_matplotlib_hackaround(self): values = self.rng[:, None] expected = self.rng.values[:, None] tm.assert_numpy_array_equal(values, expected) def test_shift(self): shifted = self.rng.shift(5) assert shifted[0] == self.rng[5] assert shifted.offset == self.rng.offset shifted = self.rng.shift(-5) assert shifted[5] == self.rng[0] assert shifted.offset == self.rng.offset shifted = self.rng.shift(0) assert shifted[0] == self.rng[0] assert shifted.offset == self.rng.offset rng = date_range(START, END, freq=BMonthEnd()) shifted = rng.shift(1, freq=BDay()) assert shifted[0] == rng[0] + BDay() def test_summary(self): self.rng.summary() self.rng[2:2].summary() def test_summary_pytz(self): bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary() def test_summary_dateutil(self): bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary() def test_equals(self): assert not self.rng.equals(list(self.rng)) def test_identical(self): t1 = self.rng.copy() t2 = self.rng.copy() assert t1.identical(t2) # name t1 = t1.rename('foo') assert t1.equals(t2) assert not t1.identical(t2) t2 = t2.rename('foo') assert t1.identical(t2) # freq t2v = Index(t2.values) assert t1.equals(t2v) assert not t1.identical(t2v) class TestCustomDatetimeIndex(object): def setup_method(self, method): self.rng = cdate_range(START, END) def test_comparison(self): d = self.rng[10] comp = self.rng > d assert comp[11] assert not comp[9] def test_copy(self): cp = self.rng.copy() repr(cp) tm.assert_index_equal(cp, self.rng) def test_repr(self): # only really care that it works repr(self.rng) def test_getitem(self): smaller = self.rng[:5] exp = DatetimeIndex(self.rng.view(np.ndarray)[:5]) tm.assert_index_equal(smaller, exp) assert smaller.offset == self.rng.offset sliced = self.rng[::5] assert sliced.offset == CDay() * 5 fancy_indexed = self.rng[[4, 3, 2, 1, 0]] assert len(fancy_indexed) == 5 assert isinstance(fancy_indexed, DatetimeIndex) assert fancy_indexed.freq is None # 32-bit vs. 64-bit platforms assert self.rng[4] == self.rng[np.int_(4)] def test_getitem_matplotlib_hackaround(self): values = self.rng[:, None] expected = self.rng.values[:, None] tm.assert_numpy_array_equal(values, expected) def test_shift(self): shifted = self.rng.shift(5) assert shifted[0] == self.rng[5] assert shifted.offset == self.rng.offset shifted = self.rng.shift(-5) assert shifted[5] == self.rng[0] assert shifted.offset == self.rng.offset shifted = self.rng.shift(0) assert shifted[0] == self.rng[0] assert shifted.offset == self.rng.offset # PerformanceWarning with warnings.catch_warnings(record=True): rng = date_range(START, END, freq=BMonthEnd()) shifted = rng.shift(1, freq=CDay()) assert shifted[0] == rng[0] + CDay() def test_pickle_unpickle(self): unpickled = tm.round_trip_pickle(self.rng) assert unpickled.offset is not None def test_summary(self): self.rng.summary() self.rng[2:2].summary() def test_summary_pytz(self): cdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary() def test_summary_dateutil(self): cdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary() def test_equals(self): assert not self.rng.equals(list(self.rng))
gpl-2.0
Jorge-C/bipy
doc/sphinxext/numpydoc/numpydoc/plot_directive.py
89
20530
""" A special directive for generating a matplotlib plot. .. warning:: This is a hacked version of plot_directive.py from Matplotlib. It's very much subject to change! Usage ----- Can be used like this:: .. plot:: examples/example.py .. plot:: import matplotlib.pyplot as plt plt.plot([1,2,3], [4,5,6]) .. plot:: A plotting example: >>> import matplotlib.pyplot as plt >>> plt.plot([1,2,3], [4,5,6]) The content is interpreted as doctest formatted if it has a line starting with ``>>>``. The ``plot`` directive supports the options format : {'python', 'doctest'} Specify the format of the input include-source : bool Whether to display the source code. Default can be changed in conf.py and the ``image`` directive options ``alt``, ``height``, ``width``, ``scale``, ``align``, ``class``. Configuration options --------------------- The plot directive has the following configuration options: plot_include_source Default value for the include-source option plot_pre_code Code that should be executed before each plot. plot_basedir Base directory, to which plot:: file names are relative to. (If None or empty, file names are relative to the directoly where the file containing the directive is.) plot_formats File formats to generate. List of tuples or strings:: [(suffix, dpi), suffix, ...] that determine the file format and the DPI. For entries whose DPI was omitted, sensible defaults are chosen. plot_html_show_formats Whether to show links to the files in HTML. TODO ---- * Refactor Latex output; now it's plain images, but it would be nice to make them appear side-by-side, or in floats. """ from __future__ import division, absolute_import, print_function import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback import sphinx if sys.version_info[0] >= 3: from io import StringIO else: from io import StringIO import warnings warnings.warn("A plot_directive module is also available under " "matplotlib.sphinxext; expect this numpydoc.plot_directive " "module to be deprecated after relevant features have been " "integrated there.", FutureWarning, stacklevel=2) #------------------------------------------------------------------------------ # Registration hook #------------------------------------------------------------------------------ def setup(app): setup.app = app setup.config = app.config setup.confdir = app.confdir app.add_config_value('plot_pre_code', '', True) app.add_config_value('plot_include_source', False, True) app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) app.add_config_value('plot_basedir', None, True) app.add_config_value('plot_html_show_formats', True, True) app.add_directive('plot', plot_directive, True, (0, 1, False), **plot_directive_options) #------------------------------------------------------------------------------ # plot:: directive #------------------------------------------------------------------------------ from docutils.parsers.rst import directives from docutils import nodes def plot_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): return run(arguments, content, options, state_machine, state, lineno) plot_directive.__doc__ = __doc__ def _option_boolean(arg): if not arg or not arg.strip(): # no argument given, assume used as a flag return True elif arg.strip().lower() in ('no', '0', 'false'): return False elif arg.strip().lower() in ('yes', '1', 'true'): return True else: raise ValueError('"%s" unknown boolean' % arg) def _option_format(arg): return directives.choice(arg, ('python', 'lisp')) def _option_align(arg): return directives.choice(arg, ("top", "middle", "bottom", "left", "center", "right")) plot_directive_options = {'alt': directives.unchanged, 'height': directives.length_or_unitless, 'width': directives.length_or_percentage_or_unitless, 'scale': directives.nonnegative_int, 'align': _option_align, 'class': directives.class_option, 'include-source': _option_boolean, 'format': _option_format, } #------------------------------------------------------------------------------ # Generating output #------------------------------------------------------------------------------ from docutils import nodes, utils try: # Sphinx depends on either Jinja or Jinja2 import jinja2 def format_template(template, **kw): return jinja2.Template(template).render(**kw) except ImportError: import jinja def format_template(template, **kw): return jinja.from_string(template, **kw) TEMPLATE = """ {{ source_code }} {{ only_html }} {% if source_link or (html_show_formats and not multi_image) %} ( {%- if source_link -%} `Source code <{{ source_link }}>`__ {%- endif -%} {%- if html_show_formats and not multi_image -%} {%- for img in images -%} {%- for fmt in img.formats -%} {%- if source_link or not loop.first -%}, {% endif -%} `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ {%- endfor -%} {%- endfor -%} {%- endif -%} ) {% endif %} {% for img in images %} .. figure:: {{ build_dir }}/{{ img.basename }}.png {%- for option in options %} {{ option }} {% endfor %} {% if html_show_formats and multi_image -%} ( {%- for fmt in img.formats -%} {%- if not loop.first -%}, {% endif -%} `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ {%- endfor -%} ) {%- endif -%} {% endfor %} {{ only_latex }} {% for img in images %} .. image:: {{ build_dir }}/{{ img.basename }}.pdf {% endfor %} """ class ImageFile(object): def __init__(self, basename, dirname): self.basename = basename self.dirname = dirname self.formats = [] def filename(self, format): return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) def filenames(self): return [self.filename(fmt) for fmt in self.formats] def run(arguments, content, options, state_machine, state, lineno): if arguments and content: raise RuntimeError("plot:: directive can't have both args and content") document = state_machine.document config = document.settings.env.config options.setdefault('include-source', config.plot_include_source) # determine input rst_file = document.attributes['source'] rst_dir = os.path.dirname(rst_file) if arguments: if not config.plot_basedir: source_file_name = os.path.join(rst_dir, directives.uri(arguments[0])) else: source_file_name = os.path.join(setup.confdir, config.plot_basedir, directives.uri(arguments[0])) code = open(source_file_name, 'r').read() output_base = os.path.basename(source_file_name) else: source_file_name = rst_file code = textwrap.dedent("\n".join(map(str, content))) counter = document.attributes.get('_plot_counter', 0) + 1 document.attributes['_plot_counter'] = counter base, ext = os.path.splitext(os.path.basename(source_file_name)) output_base = '%s-%d.py' % (base, counter) base, source_ext = os.path.splitext(output_base) if source_ext in ('.py', '.rst', '.txt'): output_base = base else: source_ext = '' # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames output_base = output_base.replace('.', '-') # is it in doctest format? is_doctest = contains_doctest(code) if 'format' in options: if options['format'] == 'python': is_doctest = False else: is_doctest = True # determine output directory name fragment source_rel_name = relpath(source_file_name, setup.confdir) source_rel_dir = os.path.dirname(source_rel_name) while source_rel_dir.startswith(os.path.sep): source_rel_dir = source_rel_dir[1:] # build_dir: where to place output files (temporarily) build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), 'plot_directive', source_rel_dir) if not os.path.exists(build_dir): os.makedirs(build_dir) # output_dir: final location in the builder's directory dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, source_rel_dir)) # how to link to files from the RST file dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), source_rel_dir).replace(os.path.sep, '/') build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') source_link = dest_dir_link + '/' + output_base + source_ext # make figures try: results = makefig(code, source_file_name, build_dir, output_base, config) errors = [] except PlotError as err: reporter = state.memo.reporter sm = reporter.system_message( 2, "Exception occurred in plotting %s: %s" % (output_base, err), line=lineno) results = [(code, [])] errors = [sm] # generate output restructuredtext total_lines = [] for j, (code_piece, images) in enumerate(results): if options['include-source']: if is_doctest: lines = [''] lines += [row.rstrip() for row in code_piece.split('\n')] else: lines = ['.. code-block:: python', ''] lines += [' %s' % row.rstrip() for row in code_piece.split('\n')] source_code = "\n".join(lines) else: source_code = "" opts = [':%s: %s' % (key, val) for key, val in list(options.items()) if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] only_html = ".. only:: html" only_latex = ".. only:: latex" if j == 0: src_link = source_link else: src_link = None result = format_template( TEMPLATE, dest_dir=dest_dir_link, build_dir=build_dir_link, source_link=src_link, multi_image=len(images) > 1, only_html=only_html, only_latex=only_latex, options=opts, images=images, source_code=source_code, html_show_formats=config.plot_html_show_formats) total_lines.extend(result.split("\n")) total_lines.extend("\n") if total_lines: state_machine.insert_input(total_lines, source=source_file_name) # copy image files to builder's output directory if not os.path.exists(dest_dir): os.makedirs(dest_dir) for code_piece, images in results: for img in images: for fn in img.filenames(): shutil.copyfile(fn, os.path.join(dest_dir, os.path.basename(fn))) # copy script (if necessary) if source_file_name == rst_file: target_name = os.path.join(dest_dir, output_base + source_ext) f = open(target_name, 'w') f.write(unescape_doctest(code)) f.close() return errors #------------------------------------------------------------------------------ # Run code and capture figures #------------------------------------------------------------------------------ import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.image as image from matplotlib import _pylab_helpers import exceptions def contains_doctest(text): try: # check if it's valid Python as-is compile(text, '<string>', 'exec') return False except SyntaxError: pass r = re.compile(r'^\s*>>>', re.M) m = r.search(text) return bool(m) def unescape_doctest(text): """ Extract code from a piece of text, which contains either Python code or doctests. """ if not contains_doctest(text): return text code = "" for line in text.split("\n"): m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) if m: code += m.group(2) + "\n" elif line.strip(): code += "# " + line.strip() + "\n" else: code += "\n" return code def split_code_at_show(text): """ Split code at plt.show() """ parts = [] is_doctest = contains_doctest(text) part = [] for line in text.split("\n"): if (not is_doctest and line.strip() == 'plt.show()') or \ (is_doctest and line.strip() == '>>> plt.show()'): part.append(line) parts.append("\n".join(part)) part = [] else: part.append(line) if "\n".join(part).strip(): parts.append("\n".join(part)) return parts class PlotError(RuntimeError): pass def run_code(code, code_path, ns=None): # Change the working directory to the directory of the example, so # it can get at its data files, if any. pwd = os.getcwd() old_sys_path = list(sys.path) if code_path is not None: dirname = os.path.abspath(os.path.dirname(code_path)) os.chdir(dirname) sys.path.insert(0, dirname) # Redirect stdout stdout = sys.stdout sys.stdout = StringIO() # Reset sys.argv old_sys_argv = sys.argv sys.argv = [code_path] try: try: code = unescape_doctest(code) if ns is None: ns = {} if not ns: exec(setup.config.plot_pre_code, ns) exec(code, ns) except (Exception, SystemExit) as err: raise PlotError(traceback.format_exc()) finally: os.chdir(pwd) sys.argv = old_sys_argv sys.path[:] = old_sys_path sys.stdout = stdout return ns #------------------------------------------------------------------------------ # Generating figures #------------------------------------------------------------------------------ def out_of_date(original, derived): """ Returns True if derivative is out-of-date wrt original, both of which are full file paths. """ return (not os.path.exists(derived) or os.stat(derived).st_mtime < os.stat(original).st_mtime) def makefig(code, code_path, output_dir, output_base, config): """ Run a pyplot script *code* and save the images under *output_dir* with file names derived from *output_base* """ # -- Parse format list default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} formats = [] for fmt in config.plot_formats: if isinstance(fmt, str): formats.append((fmt, default_dpi.get(fmt, 80))) elif type(fmt) in (tuple, list) and len(fmt)==2: formats.append((str(fmt[0]), int(fmt[1]))) else: raise PlotError('invalid image format "%r" in plot_formats' % fmt) # -- Try to determine if all images already exist code_pieces = split_code_at_show(code) # Look for single-figure output files first all_exists = True img = ImageFile(output_base, output_dir) for format, dpi in formats: if out_of_date(code_path, img.filename(format)): all_exists = False break img.formats.append(format) if all_exists: return [(code, [img])] # Then look for multi-figure output files results = [] all_exists = True for i, code_piece in enumerate(code_pieces): images = [] for j in range(1000): img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) for format, dpi in formats: if out_of_date(code_path, img.filename(format)): all_exists = False break img.formats.append(format) # assume that if we have one, we have them all if not all_exists: all_exists = (j > 0) break images.append(img) if not all_exists: break results.append((code_piece, images)) if all_exists: return results # -- We didn't find the files, so build them results = [] ns = {} for i, code_piece in enumerate(code_pieces): # Clear between runs plt.close('all') # Run code run_code(code_piece, code_path, ns) # Collect images images = [] fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() for j, figman in enumerate(fig_managers): if len(fig_managers) == 1 and len(code_pieces) == 1: img = ImageFile(output_base, output_dir) else: img = ImageFile("%s_%02d_%02d" % (output_base, i, j), output_dir) images.append(img) for format, dpi in formats: try: figman.canvas.figure.savefig(img.filename(format), dpi=dpi) except exceptions.BaseException as err: raise PlotError(traceback.format_exc()) img.formats.append(format) # Results results.append((code_piece, images)) return results #------------------------------------------------------------------------------ # Relative pathnames #------------------------------------------------------------------------------ try: from os.path import relpath except ImportError: # Copied from Python 2.7 if 'posix' in sys.builtin_module_names: def relpath(path, start=os.path.curdir): """Return a relative version of a path""" from os.path import sep, curdir, join, abspath, commonprefix, \ pardir if not path: raise ValueError("no path specified") start_list = abspath(start).split(sep) path_list = abspath(path).split(sep) # Work out how much of the filepath is shared by start and path. i = len(commonprefix([start_list, path_list])) rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list) elif 'nt' in sys.builtin_module_names: def relpath(path, start=os.path.curdir): """Return a relative version of a path""" from os.path import sep, curdir, join, abspath, commonprefix, \ pardir, splitunc if not path: raise ValueError("no path specified") start_list = abspath(start).split(sep) path_list = abspath(path).split(sep) if start_list[0].lower() != path_list[0].lower(): unc_path, rest = splitunc(path) unc_start, rest = splitunc(start) if bool(unc_path) ^ bool(unc_start): raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" % (path, start)) else: raise ValueError("path is on drive %s, start on drive %s" % (path_list[0], start_list[0])) # Work out how much of the filepath is shared by start and path. for i in range(min(len(start_list), len(path_list))): if start_list[i].lower() != path_list[i].lower(): break else: i += 1 rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list) else: raise RuntimeError("Unsupported platform (no relpath available!)")
bsd-3-clause
jluttine/bayespy
doc/source/conf.py
1
11116
# -*- coding: utf-8 -*- # # BayesPy documentation build configuration file, created by # sphinx-quickstart on Mon Aug 27 12:22:11 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os ON_RTD = os.environ.get('READTHEDOCS') == 'True' # Use some dummy modules on Read the Docs because they are not available # (requires some C libraries) # http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules if ON_RTD: from unittest.mock import MagicMock MOCK_MODULES = ['h5py'] sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES) # -- General configuration ----------------------------------------------------- import bayespy as bp # Use the 'Read the Docs' theme html_theme = 'sphinx_rtd_theme' # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.doctest', 'numpydoc', 'matplotlib.sphinxext.plot_directive', 'sphinx.ext.autosummary', 'sphinxcontrib.tikz', 'sphinxcontrib.bayesnet', 'sphinxcontrib.bibtex', 'nbsphinx', ] # Image format for math imgmath_image_format = 'svg' # Choose the image processing ‹suite›, either 'Netpbm', 'pdf2svg', 'GhostScript', 'ImageMagick' ('Netpbm' by default): # If you want your documentation to be built on http://readthedocs.org, you have to choose GhostScript. # All suites produce png images, excepted 'pdf2svg' which produces svg. if ON_RTD: tikz_proc_suite = 'GhostScript' else: tikz_proc_suite = 'pdf2svg' if ON_RTD: # For some reason, RTD needs these to be set explicitly although they # should have default values math_number_all = False numpydoc_show_class_members = False # Include TODOs in the documentation? todo_include_todos = True # Generate autosummary stub pages automatically # Or manually: sphinx-autogen -o source/generated source/*.rst #autosummary_generate = False import glob autosummary_generate = glob.glob("*.rst") + glob.glob("*/*.rst") + glob.glob("*/*/*.rst") + glob.glob("*/*/*/*.rst") # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = "BayesPy" copyright = bp.__copyright__ # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = bp.__version__ # The full version, including alpha/beta/rc tags. release = bp.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ '**.ipynb_checkpoints' ] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # Sphinx-TikZ extension tikz_latex_preamble = r""" \usepackage{amsmath} """ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'sphinxdoc' #html_theme = 'nature' #html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { # "sidebarwidth": 300 # } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "BayesPy v%s Documentation" % (version) # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'BayesPydoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. 'preamble': r''' \usepackage{tikz} \usepackage{amssymb} \usepackage{amsmath} \usepackage{svg} \usetikzlibrary{shapes} \usetikzlibrary{fit} \usetikzlibrary{chains} \usetikzlibrary{arrows} ''', # Do not use [T1]{fontenc} because it does not work on libre systems 'fontenc': '' } #latex_additional_files = ['images/bayesnet.sty',] # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'BayesPy.tex', u'BayesPy Documentation', u'Jaakko Luttinen', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'bayespy', u'BayesPy Documentation', [u'Jaakko Luttinen'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'BayesPy', u'BayesPy Documentation', u'Jaakko Luttinen', 'BayesPy', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'BayesPy' epub_author = bp.__author__ epub_publisher = bp.__author__ epub_copyright = bp.__copyright__ # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Read the docs fails to import _tkinter so use Agg backend import matplotlib matplotlib.use('agg')
mit
h2educ/scikit-learn
sklearn/svm/classes.py
126
40114
import warnings import numpy as np from .base import _fit_liblinear, BaseSVC, BaseLibSVM from ..base import BaseEstimator, RegressorMixin from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \ LinearModel from ..feature_selection.from_model import _LearntSelectorMixin from ..utils import check_X_y from ..utils.validation import _num_samples class LinearSVC(BaseEstimator, LinearClassifierMixin, _LearntSelectorMixin, SparseCoefMixin): """Linear Support Vector Classification. Similar to SVC with parameter kernel='linear', but implemented in terms of liblinear rather than libsvm, so it has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples. This class supports both dense and sparse input and the multiclass support is handled according to a one-vs-the-rest scheme. Read more in the :ref:`User Guide <svm_classification>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge') Specifies the loss function. 'hinge' is the standard SVM loss (used e.g. by the SVC class) while 'squared_hinge' is the square of the hinge loss. penalty : string, 'l1' or 'l2' (default='l2') Specifies the norm used in the penalization. The 'l2' penalty is the standard used in SVC. The 'l1' leads to `coef_` vectors that are sparse. dual : bool, (default=True) Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features. tol : float, optional (default=1e-4) Tolerance for stopping criteria. multi_class: string, 'ovr' or 'crammer_singer' (default='ovr') Determines the multi-class strategy if `y` contains more than two classes. `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer` optimizes a joint objective over all classes. While `crammer_singer` is interesting from a theoretical perspective as it is consistent, it is seldom used in practice as it rarely leads to better accuracy and is more expensive to compute. If `crammer_singer` is chosen, the options loss, penalty and dual will be ignored. fit_intercept : boolean, optional (default=True) Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (i.e. data is expected to be already centered). intercept_scaling : float, optional (default=1) When self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. class_weight : {dict, 'balanced'}, optional Set the parameter C of class i to class_weight[i]*C for SVC. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` verbose : int, (default=0) Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in liblinear that, if enabled, may not work properly in a multithreaded context. random_state : int seed, RandomState instance, or None (default=None) The seed of the pseudo random number generator to use when shuffling the data. max_iter : int, (default=1000) The maximum number of iterations to be run. Attributes ---------- coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is a readonly property derived from `raw_coef_` that follows the internal memory layout of liblinear. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. Notes ----- The underlying C implementation uses a random number generator to select features when fitting the model. It is thus not uncommon to have slightly different results for the same input data. If that happens, try with a smaller ``tol`` parameter. The underlying implementation (liblinear) uses a sparse internal representation for the data that will incur a memory copy. Predict output may not match that of standalone liblinear in certain cases. See :ref:`differences from liblinear <liblinear_differences>` in the narrative documentation. **References:** `LIBLINEAR: A Library for Large Linear Classification <http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__ See also -------- SVC Implementation of Support Vector Machine classifier using libsvm: the kernel can be non-linear but its SMO algorithm does not scale to large number of samples as LinearSVC does. Furthermore SVC multi-class mode is implemented using one vs one scheme while LinearSVC uses one vs the rest. It is possible to implement one vs the rest with SVC by using the :class:`sklearn.multiclass.OneVsRestClassifier` wrapper. Finally SVC can fit dense data without memory copy if the input is C-contiguous. Sparse data will still incur memory copy though. sklearn.linear_model.SGDClassifier SGDClassifier can optimize the same cost function as LinearSVC by adjusting the penalty and loss parameters. In addition it requires less memory, allows incremental (online) learning, and implements various loss functions and regularization regimes. """ def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4, C=1.0, multi_class='ovr', fit_intercept=True, intercept_scaling=1, class_weight=None, verbose=0, random_state=None, max_iter=1000): self.dual = dual self.tol = tol self.C = C self.multi_class = multi_class self.fit_intercept = fit_intercept self.intercept_scaling = intercept_scaling self.class_weight = class_weight self.verbose = verbose self.random_state = random_state self.max_iter = max_iter self.penalty = penalty self.loss = loss def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target vector relative to X Returns ------- self : object Returns self. """ # FIXME Remove l1/l2 support in 1.0 ----------------------------------- loss_l = self.loss.lower() msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the loss='%s' will be removed in %s") # FIXME change loss_l --> self.loss after 0.18 if loss_l in ('l1', 'l2'): old_loss = self.loss self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l) warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'), DeprecationWarning) # --------------------------------------------------------------------- if self.C < 0: raise ValueError("Penalty term must be positive; got (C=%r)" % self.C) X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C") self.classes_ = np.unique(y) self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear( X, y, self.C, self.fit_intercept, self.intercept_scaling, self.class_weight, self.penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, self.multi_class, self.loss) if self.multi_class == "crammer_singer" and len(self.classes_) == 2: self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1) if self.fit_intercept: intercept = self.intercept_[1] - self.intercept_[0] self.intercept_ = np.array([intercept]) return self class LinearSVR(LinearModel, RegressorMixin): """Linear Support Vector Regression. Similar to SVR with parameter kernel='linear', but implemented in terms of liblinear rather than libsvm, so it has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples. This class supports both dense and sparse input. Read more in the :ref:`User Guide <svm_regression>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. The penalty is a squared l2 penalty. The bigger this parameter, the less regularization is used. loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive') Specifies the loss function. 'l1' is the epsilon-insensitive loss (standard SVR) while 'l2' is the squared epsilon-insensitive loss. epsilon : float, optional (default=0.1) Epsilon parameter in the epsilon-insensitive loss function. Note that the value of this parameter depends on the scale of the target variable y. If unsure, set epsilon=0. dual : bool, (default=True) Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features. tol : float, optional (default=1e-4) Tolerance for stopping criteria. fit_intercept : boolean, optional (default=True) Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (i.e. data is expected to be already centered). intercept_scaling : float, optional (default=1) When self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. verbose : int, (default=0) Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in liblinear that, if enabled, may not work properly in a multithreaded context. random_state : int seed, RandomState instance, or None (default=None) The seed of the pseudo random number generator to use when shuffling the data. max_iter : int, (default=1000) The maximum number of iterations to be run. Attributes ---------- coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is a readonly property derived from `raw_coef_` that follows the internal memory layout of liblinear. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. See also -------- LinearSVC Implementation of Support Vector Machine classifier using the same library as this class (liblinear). SVR Implementation of Support Vector Machine regression using libsvm: the kernel can be non-linear but its SMO algorithm does not scale to large number of samples as LinearSVC does. sklearn.linear_model.SGDRegressor SGDRegressor can optimize the same cost function as LinearSVR by adjusting the penalty and loss parameters. In addition it requires less memory, allows incremental (online) learning, and implements various loss functions and regularization regimes. """ def __init__(self, epsilon=0.0, tol=1e-4, C=1.0, loss='epsilon_insensitive', fit_intercept=True, intercept_scaling=1., dual=True, verbose=0, random_state=None, max_iter=1000): self.tol = tol self.C = C self.epsilon = epsilon self.fit_intercept = fit_intercept self.intercept_scaling = intercept_scaling self.verbose = verbose self.random_state = random_state self.max_iter = max_iter self.dual = dual self.loss = loss def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target vector relative to X Returns ------- self : object Returns self. """ # FIXME Remove l1/l2 support in 1.0 ----------------------------------- loss_l = self.loss.lower() msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the loss='%s' will be removed in %s") # FIXME change loss_l --> self.loss after 0.18 if loss_l in ('l1', 'l2'): old_loss = self.loss self.loss = {'l1': 'epsilon_insensitive', 'l2': 'squared_epsilon_insensitive' }.get(loss_l) warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'), DeprecationWarning) # --------------------------------------------------------------------- if self.C < 0: raise ValueError("Penalty term must be positive; got (C=%r)" % self.C) X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C") penalty = 'l2' # SVR only accepts l2 penalty self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear( X, y, self.C, self.fit_intercept, self.intercept_scaling, None, penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, loss=self.loss, epsilon=self.epsilon) self.coef_ = self.coef_.ravel() return self class SVC(BaseSVC): """C-Support Vector Classification. The implementation is based on libsvm. The fit time complexity is more than quadratic with the number of samples which makes it hard to scale to dataset with more than a couple of 10000 samples. The multiclass support is handled according to a one-vs-one scheme. For details on the precise mathematical formulation of the provided kernel functions and how `gamma`, `coef0` and `degree` affect each other, see the corresponding section in the narrative documentation: :ref:`svm_kernels`. Read more in the :ref:`User Guide <svm_classification>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to pre-compute the kernel matrix from data matrices; that matrix should be an array of shape ``(n_samples, n_samples)``. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. probability : boolean, optional (default=False) Whether to enable probability estimates. This must be enabled prior to calling `fit`, and will slow down that method. shrinking : boolean, optional (default=True) Whether to use the shrinking heuristic. tol : float, optional (default=1e-3) Tolerance for stopping criterion. cache_size : float, optional Specify the size of the kernel cache (in MB). class_weight : {dict, 'balanced'}, optional Set the parameter C of class i to class_weight[i]*C for SVC. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. decision_function_shape : 'ovo', 'ovr' or None, default=None Whether to return a one-vs-rest ('ovr') ecision function of shape (n_samples, n_classes) as all other classifiers, or the original one-vs-one ('ovo') decision function of libsvm which has shape (n_samples, n_classes * (n_classes - 1) / 2). The default of None will currently behave as 'ovo' for backward compatibility and raise a deprecation warning, but will change 'ovr' in 0.18. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data for probability estimation. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [n_SV, n_features] Support vectors. n_support_ : array-like, dtype=int32, shape = [n_class] Number of support vectors for each class. dual_coef_ : array, shape = [n_class-1, n_SV] Coefficients of the support vector in the decision function. For multiclass, coefficient for all 1-vs-1 classifiers. The layout of the coefficients in the multiclass case is somewhat non-trivial. See the section about multi-class classification in the SVM section of the User Guide for details. coef_ : array, shape = [n_class-1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is a readonly property derived from `dual_coef_` and `support_vectors_`. intercept_ : array, shape = [n_class * (n_class-1) / 2] Constants in decision function. Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> y = np.array([1, 1, 2, 2]) >>> from sklearn.svm import SVC >>> clf = SVC() >>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape=None, degree=3, gamma='auto', kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- SVR Support Vector Machine for Regression implemented using libsvm. LinearSVC Scalable Linear Support Vector Machine for classification implemented using liblinear. Check the See also section of LinearSVC for more comparison element. """ def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto', coef0=0.0, shrinking=True, probability=False, tol=1e-3, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape=None, random_state=None): super(SVC, self).__init__( impl='c_svc', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking, probability=probability, cache_size=cache_size, class_weight=class_weight, verbose=verbose, max_iter=max_iter, decision_function_shape=decision_function_shape, random_state=random_state) class NuSVC(BaseSVC): """Nu-Support Vector Classification. Similar to SVC but uses a parameter to control the number of support vectors. The implementation is based on libsvm. Read more in the :ref:`User Guide <svm_classification>`. Parameters ---------- nu : float, optional (default=0.5) An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to precompute the kernel matrix. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. probability : boolean, optional (default=False) Whether to enable probability estimates. This must be enabled prior to calling `fit`, and will slow down that method. shrinking : boolean, optional (default=True) Whether to use the shrinking heuristic. tol : float, optional (default=1e-3) Tolerance for stopping criterion. cache_size : float, optional Specify the size of the kernel cache (in MB). class_weight : {dict, 'auto'}, optional Set the parameter C of class i to class_weight[i]*C for SVC. If not given, all classes are supposed to have weight one. The 'auto' mode uses the values of y to automatically adjust weights inversely proportional to class frequencies. verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. decision_function_shape : 'ovo', 'ovr' or None, default=None Whether to return a one-vs-rest ('ovr') ecision function of shape (n_samples, n_classes) as all other classifiers, or the original one-vs-one ('ovo') decision function of libsvm which has shape (n_samples, n_classes * (n_classes - 1) / 2). The default of None will currently behave as 'ovo' for backward compatibility and raise a deprecation warning, but will change 'ovr' in 0.18. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data for probability estimation. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [n_SV, n_features] Support vectors. n_support_ : array-like, dtype=int32, shape = [n_class] Number of support vectors for each class. dual_coef_ : array, shape = [n_class-1, n_SV] Coefficients of the support vector in the decision function. For multiclass, coefficient for all 1-vs-1 classifiers. The layout of the coefficients in the multiclass case is somewhat non-trivial. See the section about multi-class classification in the SVM section of the User Guide for details. coef_ : array, shape = [n_class-1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is readonly property derived from `dual_coef_` and `support_vectors_`. intercept_ : array, shape = [n_class * (n_class-1) / 2] Constants in decision function. Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> y = np.array([1, 1, 2, 2]) >>> from sklearn.svm import NuSVC >>> clf = NuSVC() >>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE NuSVC(cache_size=200, class_weight=None, coef0=0.0, decision_function_shape=None, degree=3, gamma='auto', kernel='rbf', max_iter=-1, nu=0.5, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- SVC Support Vector Machine for classification using libsvm. LinearSVC Scalable linear Support Vector Machine for classification using liblinear. """ def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto', coef0=0.0, shrinking=True, probability=False, tol=1e-3, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape=None, random_state=None): super(NuSVC, self).__init__( impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking, probability=probability, cache_size=cache_size, class_weight=class_weight, verbose=verbose, max_iter=max_iter, decision_function_shape=decision_function_shape, random_state=random_state) class SVR(BaseLibSVM, RegressorMixin): """Epsilon-Support Vector Regression. The free parameters in the model are C and epsilon. The implementation is based on libsvm. Read more in the :ref:`User Guide <svm_regression>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. epsilon : float, optional (default=0.1) Epsilon in the epsilon-SVR model. It specifies the epsilon-tube within which no penalty is associated in the training loss function with points predicted within a distance epsilon from the actual value. kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to precompute the kernel matrix. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. shrinking : boolean, optional (default=True) Whether to use the shrinking heuristic. tol : float, optional (default=1e-3) Tolerance for stopping criterion. cache_size : float, optional Specify the size of the kernel cache (in MB). verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [nSV, n_features] Support vectors. dual_coef_ : array, shape = [1, n_SV] Coefficients of the support vector in the decision function. coef_ : array, shape = [1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is readonly property derived from `dual_coef_` and `support_vectors_`. intercept_ : array, shape = [1] Constants in decision function. Examples -------- >>> from sklearn.svm import SVR >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = SVR(C=1.0, epsilon=0.2) >>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto', kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False) See also -------- NuSVR Support Vector Machine for regression implemented using libsvm using a parameter to control the number of support vectors. LinearSVR Scalable Linear Support Vector Machine for regression implemented using liblinear. """ def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=1e-3, C=1.0, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=-1): super(SVR, self).__init__( 'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose, shrinking=shrinking, probability=False, cache_size=cache_size, class_weight=None, max_iter=max_iter, random_state=None) class NuSVR(BaseLibSVM, RegressorMixin): """Nu Support Vector Regression. Similar to NuSVC, for regression, uses a parameter nu to control the number of support vectors. However, unlike NuSVC, where nu replaces C, here nu replaces the parameter epsilon of epsilon-SVR. The implementation is based on libsvm. Read more in the :ref:`User Guide <svm_regression>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. nu : float, optional An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. By default 0.5 will be taken. kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to precompute the kernel matrix. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. shrinking : boolean, optional (default=True) Whether to use the shrinking heuristic. tol : float, optional (default=1e-3) Tolerance for stopping criterion. cache_size : float, optional Specify the size of the kernel cache (in MB). verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [nSV, n_features] Support vectors. dual_coef_ : array, shape = [1, n_SV] Coefficients of the support vector in the decision function. coef_ : array, shape = [1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is readonly property derived from `dual_coef_` and `support_vectors_`. intercept_ : array, shape = [1] Constants in decision function. Examples -------- >>> from sklearn.svm import NuSVR >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = NuSVR(C=1.0, nu=0.1) >>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto', kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001, verbose=False) See also -------- NuSVC Support Vector Machine for classification implemented with libsvm with a parameter to control the number of support vectors. SVR epsilon Support Vector Machine for regression implemented with libsvm. """ def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3, gamma='auto', coef0=0.0, shrinking=True, tol=1e-3, cache_size=200, verbose=False, max_iter=-1): super(NuSVR, self).__init__( 'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking, probability=False, cache_size=cache_size, class_weight=None, verbose=verbose, max_iter=max_iter, random_state=None) class OneClassSVM(BaseLibSVM): """Unsupervised Outlier Detection. Estimate the support of a high-dimensional distribution. The implementation is based on libsvm. Read more in the :ref:`User Guide <svm_outlier_detection>`. Parameters ---------- kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to precompute the kernel matrix. nu : float, optional An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. By default 0.5 will be taken. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. tol : float, optional Tolerance for stopping criterion. shrinking : boolean, optional Whether to use the shrinking heuristic. cache_size : float, optional Specify the size of the kernel cache (in MB). verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data for probability estimation. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [nSV, n_features] Support vectors. dual_coef_ : array, shape = [n_classes-1, n_SV] Coefficients of the support vectors in the decision function. coef_ : array, shape = [n_classes-1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is readonly property derived from `dual_coef_` and `support_vectors_` intercept_ : array, shape = [n_classes-1] Constants in decision function. """ def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=1e-3, nu=0.5, shrinking=True, cache_size=200, verbose=False, max_iter=-1, random_state=None): super(OneClassSVM, self).__init__( 'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking, False, cache_size, None, verbose, max_iter, random_state) def fit(self, X, y=None, sample_weight=None, **params): """ Detects the soft boundary of the set of samples X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Set of samples, where n_samples is the number of samples and n_features is the number of features. sample_weight : array-like, shape (n_samples,) Per-sample weights. Rescale C per sample. Higher weights force the classifier to put more emphasis on these points. Returns ------- self : object Returns self. Notes ----- If X is not a C-ordered contiguous array it is copied. """ super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight, **params) return self def decision_function(self, X): """Distance of the samples X to the separating hyperplane. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- X : array-like, shape (n_samples,) Returns the decision function of the samples. """ dec = self._decision_function(X) return dec
bsd-3-clause
KarlClinckspoor/SAXS_treatment
WLM models/interactive_fit_2.py
1
5865
from SAXS_FF import WLM_whole_q import matplotlib.pyplot as plt import numpy as np from matplotlib.widgets import Slider, Button data_x = [] data_y = [] data_y_err = [] with open(r'D:\Dropbox\Python\SAXS\WLM_models\MG.txt', 'r') as fhand: counter = 0 for line in fhand: try: temp_x, temp_y = line.rstrip().split('\t') data_x.append(float(temp_x)/10) data_y.append(float(temp_y)) #data_y_err.append(float(temp_err)) except: pass #print('Line invalid') # plt.errorbar(data_x, data_y, data_y_err) # plt.show() # Initial parameters. scale = 1e+06 # 0.1 d_head = 0.1929E+02 # 20 rad_core = 0.8109E+01 # 8 rho_rel = 0.5999E-01 # 0.06 sigma = 0.1000E+01 # 1 back = 5000 # 0 L = 880 # 5000 kuhn = 195 # 1000 eps = 0.1000E+01 # 1 D_CQ = 36 # 105 nu_rpa = 66 # 38 SC_pow = 4218219 # 0.000 exponent = 4 figure_bottom = 0.50 fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111) fig.subplots_adjust(left=0.15, bottom=figure_bottom) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel('q/Å') ax.set_ylabel('I(q)') qs = np.logspace(-1.8, -0.3) Ints = WLM_whole_q(qs, scale, d_head, rad_core, rho_rel, sigma, back, L, kuhn, eps, D_CQ, nu_rpa, SC_pow, exponent) [line] = ax.plot(qs, Ints, linewidth=2, color='red') line2 = ax.plot(data_x, data_y) ax.axis([0.01, 0.5, 500, 5E7]) axis_color = 'lightgoldenrodyellow' first_slider_bottom = figure_bottom - 0.1 slider_bottoms = [first_slider_bottom - (i - 1) * 0.03 for i in range(1, 14, 1)] slider_height = 0.015 slider_left = 0.25 slider_width = 0.65 # add_axes([left, bottom, width, height]) # Slider(ax, label, valmin, valmax, valinit) scale_slider_ax = fig.add_axes([slider_left, slider_bottoms[0], slider_width, slider_height], facecolor=axis_color) scale_slider = Slider(scale_slider_ax, 'Scale', 1000, 100000000, valinit=scale) d_head_slider_ax = fig.add_axes([slider_left, slider_bottoms[1], slider_width, slider_height], facecolor=axis_color) d_head_slider = Slider(d_head_slider_ax, 'D_head', d_head / 10, d_head * 10, valinit=d_head) rad_core_slider_ax = fig.add_axes([slider_left, slider_bottoms[2], slider_width, slider_height], facecolor=axis_color) rad_core_slider = Slider(rad_core_slider_ax, 'rad_core', rad_core / 10, rad_core * 10, valinit=rad_core) rho_rel_slider_ax = fig.add_axes([slider_left, slider_bottoms[3], slider_width, slider_height], facecolor=axis_color) rho_rel_slider = Slider(rho_rel_slider_ax, 'rho_rel', rho_rel / 10, rho_rel * 10, valinit=rho_rel) sigma_slider_ax = fig.add_axes([slider_left, slider_bottoms[4], slider_width, slider_height], facecolor=axis_color) sigma_slider = Slider(sigma_slider_ax, 'sigma', sigma / 10, sigma * 10, valinit=sigma) back_slider_ax = fig.add_axes([slider_left, slider_bottoms[5], slider_width, slider_height], facecolor=axis_color) back_slider = Slider(back_slider_ax, 'back', 5E3, 1E5, valinit=back) L_slider_ax = fig.add_axes([slider_left, slider_bottoms[6], slider_width, slider_height], facecolor=axis_color) L_slider = Slider(L_slider_ax, 'L', L / 10, L * 10, valinit=L) kuhn_slider_ax = fig.add_axes([slider_left, slider_bottoms[7], slider_width, slider_height], facecolor=axis_color) kuhn_slider = Slider(kuhn_slider_ax, 'kuhn', kuhn / 10, kuhn * 10, valinit=kuhn) eps_slider_ax = fig.add_axes([slider_left, slider_bottoms[8], slider_width, slider_height], facecolor=axis_color) eps_slider = Slider(eps_slider_ax, 'eps', eps / 10, eps * 10, valinit=eps) D_CQ_slider_ax = fig.add_axes([slider_left, slider_bottoms[9], slider_width, slider_height], facecolor=axis_color) D_CQ_slider = Slider(D_CQ_slider_ax, 'D_CQ', D_CQ / 10, D_CQ * 10, valinit=D_CQ) nu_rpa_slider_ax = fig.add_axes([slider_left, slider_bottoms[10], slider_width, slider_height], facecolor=axis_color) nu_rpa_slider = Slider(nu_rpa_slider_ax, 'nu_rpa', nu_rpa / 10, nu_rpa * 10, valinit=nu_rpa) SC_pow_slider_ax = fig.add_axes([slider_left, slider_bottoms[11], slider_width, slider_height], facecolor=axis_color) SC_pow_slider = Slider(SC_pow_slider_ax, 'SC_pow', SC_pow / 10, SC_pow * 10, valinit=SC_pow) exponent_slider_ax = fig.add_axes([slider_left, slider_bottoms[12], slider_width, slider_height], facecolor=axis_color) exponent_slider = Slider(exponent_slider_ax, 'exponent', 0, 4, valinit=exponent) def sliders_on_changed(val): line.set_ydata(WLM_whole_q(qs, scale_slider.val, d_head_slider.val, rad_core_slider.val, rho_rel_slider.val, sigma_slider.val, back_slider.val, L_slider.val, kuhn_slider.val, eps_slider.val, D_CQ_slider.val, nu_rpa_slider.val, SC_pow_slider.val, exponent_slider.val)) fig.canvas.draw_idle() scale_slider.on_changed(sliders_on_changed) d_head_slider.on_changed(sliders_on_changed) rad_core_slider.on_changed(sliders_on_changed) rho_rel_slider.on_changed(sliders_on_changed) sigma_slider.on_changed(sliders_on_changed) back_slider.on_changed(sliders_on_changed) L_slider.on_changed(sliders_on_changed) kuhn_slider.on_changed(sliders_on_changed) eps_slider.on_changed(sliders_on_changed) D_CQ_slider.on_changed(sliders_on_changed) nu_rpa_slider.on_changed(sliders_on_changed) SC_pow_slider.on_changed(sliders_on_changed) exponent_slider.on_changed(sliders_on_changed) reset_button_ax = fig.add_axes([0.05, 0.4, 0.1, 0.04]) reset_button = Button(reset_button_ax, 'Reset', color=axis_color, hovercolor='0.975') def reset_button_on_clicked(mouse_event): scale_slider.reset() d_head_slider.reset() rad_core_slider.reset() rho_rel_slider.reset() sigma_slider.reset() back_slider.reset() L_slider.reset() kuhn_slider.reset() eps_slider.reset() D_CQ_slider.reset() nu_rpa_slider.reset() SC_pow_slider.reset() exponent_slider.reset() reset_button.on_clicked(reset_button_on_clicked) plt.show()
gpl-3.0
athomasmr23/Ardyno
ardynoMain.py
1
5242
import Tkinter as tk #import matplotlib from numpy import arange, sin, pi from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.figure import Figure #import Tkinter as Tk import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation class Ardyno(tk.Frame): def __init__(self, parent): tk.Frame.__init__(self, parent) self.parent = parent self.initUI() def initUI(self): self.parent.title("Ardyno") menubar = tk.Menu(self.parent) self.parent.config(menu=menubar) filemenu=tk.Menu(menubar, tearoff=0) filemenu.add_command(label="New") filemenu.add_command(label="Open") filemenu.add_command(label="Save") filemenu.add_command(label="Save as...") filemenu.add_command(label="Close") filemenu.add_separator() filemenu.add_command(label="Exit",command=self.destroy) menubar.add_cascade(label="File",menu=filemenu) commenu=tk.Menu(menubar, tearoff=0) commenu.add_command(label="Connect") menubar.add_cascade(label="Communications",menu=commenu) helpmenu=tk.Menu(menubar,tearoff=0) helpmenu.add_command(label="Help") helpmenu.add_command(label="About") menubar.add_cascade(label="Help",menu=helpmenu) toolbar = tk.Frame(self.parent, bd=1, relief="raised") toolbar.pack(side="top", fill="x") testControlFrame = tk.Frame(self.parent, borderwidth=2,relief="groove") testControlFrame.pack(side="left", fill="both") #SAE Correction factor SAEcorrectionFrame = tk.LabelFrame(testControlFrame, borderwidth=2, relief="groove",text="SAE Correction") tk.Label(SAEcorrectionFrame,text="Ambient Temp [C]").grid(row=1,column=0,padx=5) tk.Label(SAEcorrectionFrame,relief="sunken",width=7).grid(row=2,column=0,padx=5,pady=5) tk.Label(SAEcorrectionFrame,text="Ambient Pressure [Kpa]").grid(row=3,column=0,padx=5) tk.Label(SAEcorrectionFrame,relief="sunken",width=7).grid(row=4,column=0,padx=5,pady=5) tk.Label(SAEcorrectionFrame,text="SAE Correction Factor").grid(row=5,column=0,padx=5) tk.Label(SAEcorrectionFrame,relief="sunken",width=7).grid(row=6,column=0,padx=5,pady=5) tk.Button(SAEcorrectionFrame,text="Correction Factor").grid(row=7,column=0,padx=5,pady=5,columnspan=2,rowspan=2) SAEcorrectionFrame.pack(side="top", fill="both") #Load cell setup and calibration LoadCellCal=tk.LabelFrame(testControlFrame,borderwidth=2,relief="groove",text="Calibration") tk.Label(LoadCellCal,text="No Load").grid(row=3,column=0,padx=5) tk.Label(LoadCellCal,relief="sunken",width=6).grid(row=3,column=1,padx=5,pady=5) tk.Label(LoadCellCal,text="Loaded").grid(row=4,column=0,padx=10) tk.Label(LoadCellCal,relief="sunken",width=6).grid(row=4,column=1,padx=5,pady=5) tk.Button(LoadCellCal,text="Unloaded Calibration").grid(row=3,column=2,padx=5,pady=5,columnspan=1,rowspan=1) tk.Button(LoadCellCal,text="Loaded Calibration").grid(row=4,column=2,padx=5,pady=5,columnspan=1,rowspan=1) tk.Label(LoadCellCal,text="Calibration distance [in]").grid(row=0,column=0,padx=5,pady=5,sticky="w") entryL=tk.Entry(LoadCellCal,width=6) entryL.grid(row=0,column=1,padx=5,pady=5,columnspan=2,sticky="w") entryL.delete(0,"end") entryL.insert(0,4.75) tk.Label(LoadCellCal,text="Calibration weight [lbf]").grid(row=1,column=0,padx=5,pady=5,sticky="w") entryW=tk.Entry(LoadCellCal,width=6) entryW.grid(row=1,column=1,padx=5,pady=5,columnspan=2,sticky="w") entryW.delete(0,"end") entryW.insert(0,10.08) tk.Label(LoadCellCal,relief="sunken",width=40).grid(row=5,column=0,padx=5,pady=5,columnspan=3,sticky="e") tk.Button(LoadCellCal,text="Generate Fit").grid(row=5,column=3,padx=5,pady=5,rowspan=1) LoadCellCal.pack(fill="both") testOperationFrame = tk.Frame(self.parent, borderwidth=2,relief="groove") testOperationFrame.pack(side="right", fill="both") RunStopFrame=tk.LabelFrame(testOperationFrame,borderwidth=2,relief="groove") tk.Button(RunStopFrame,text="Create Test").grid(row=0,column=0,padx=5,pady=5,columnspan=2) tk.Button(RunStopFrame,text="Run").grid(row=1,column=0,padx=5,pady=5) tk.Button(RunStopFrame,text="Stop").grid(row=1,column=1,padx=5,pady=5) tk.Button(RunStopFrame,text="Plot").grid(row=2,column=0,padx=5,pady=5) tk.Button(RunStopFrame,text="Export").grid(row=2,column=1,padx=5,pady=5,columnspan=2) RunStopFrame.pack(padx=10,pady=10) ## fig=plt.Figure() ## graphCanvas = FigureCanvasTkAgg(fig, self.parent) ## graphCanvas.get_tk_widget().grid(column=0,row=0) #graphCanvas.pack(side=TOP, fill=BOTH) self.parent.config(menu=menubar) self.pack() def onExit(self): self.quit() def main(): root = tk.Tk() root.geometry("917x540+300+300") app = Ardyno(root) root.mainloop() if __name__ == '__main__': main()
gpl-2.0
ashhher3/scikit-learn
sklearn/hmm.py
12
48722
# Hidden Markov Models # # Author: Ron Weiss <[email protected]> # and Shiqiao Du <[email protected]> # API changes: Jaques Grobler <[email protected]> """ The :mod:`sklearn.hmm` module implements hidden Markov models. **Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known numerical stability issues. This module will be removed in version 0.17. It has been moved to a separate repository: https://github.com/hmmlearn/hmmlearn """ import string import numpy as np from .utils import check_random_state, deprecated from .utils.extmath import logsumexp from .utils.validation import check_is_fitted from .base import BaseEstimator from .mixture import ( GMM, log_multivariate_normal_density, sample_gaussian, distribute_covar_matrix_to_match_covariance_type, _validate_covars) from . import cluster from . import _hmmc __all__ = ['GMMHMM', 'GaussianHMM', 'MultinomialHMM', 'decoder_algorithms', 'normalize'] ZEROLOGPROB = -1e200 EPS = np.finfo(float).eps NEGINF = -np.inf decoder_algorithms = ("viterbi", "map") @deprecated("WARNING: The HMM module and its functions will be removed in 0.17 " "as it no longer falls within the project's scope and API. " "It has been moved to a separate repository: " "https://github.com/hmmlearn/hmmlearn") def normalize(A, axis=None): """ Normalize the input array so that it sums to 1. WARNING: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Parameters ---------- A: array, shape (n_samples, n_features) Non-normalized input data axis: int dimension along which normalization is performed Returns ------- normalized_A: array, shape (n_samples, n_features) A with values normalized (summing to 1) along the prescribed axis WARNING: Modifies inplace the array """ A += EPS Asum = A.sum(axis) if axis and A.ndim > 1: # Make sure we don't divide by zero. Asum[Asum == 0] = 1 shape = list(A.shape) shape[axis] = 1 Asum.shape = shape return A / Asum @deprecated("WARNING: The HMM module and its function will be removed in 0.17" "as it no longer falls within the project's scope and API. " "It has been moved to a separate repository: " "https://github.com/hmmlearn/hmmlearn") class _BaseHMM(BaseEstimator): """Hidden Markov Model base class. Representation of a hidden Markov model probability distribution. This class allows for easy evaluation of, sampling from, and maximum-likelihood estimation of the parameters of a HMM. See the instance documentation for details specific to a particular object. .. warning:: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Attributes ---------- n_components : int Number of states in the model. transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. transmat_prior : array, shape (`n_components`, `n_components`) Matrix of prior transition probabilities between states. startprob_prior : array, shape ('n_components`,) Initial state occupation prior distribution. algorithm : string, one of the decoder_algorithms decoder algorithm random_state: RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, and other characters for subclass-specific emmission parameters. Defaults to all parameters. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, and other characters for subclass-specific emmission parameters. Defaults to all parameters. See Also -------- GMM : Gaussian mixture model """ # This class implements the public interface to all HMMs that # derive from it, including all of the machinery for the # forward-backward and Viterbi algorithms. Subclasses need only # implement _generate_sample_from_state(), _compute_log_likelihood(), # _init(), _initialize_sufficient_statistics(), # _accumulate_sufficient_statistics(), and _do_mstep(), all of # which depend on the specific emission distribution. # # Subclasses will probably also want to implement properties for # the emission distribution parameters to expose them publicly. def __init__(self, n_components=1, startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): self.n_components = n_components self.n_iter = n_iter self.thresh = thresh self.params = params self.init_params = init_params self.startprob_ = startprob self.startprob_prior = startprob_prior self.transmat_ = transmat self.transmat_prior = transmat_prior self._algorithm = algorithm self.random_state = random_state def eval(self, X): return self.score_samples(X) def score_samples(self, obs): """Compute the log probability under the model and compute posteriors. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- logprob : float Log likelihood of the sequence ``obs``. posteriors : array_like, shape (n, n_components) Posterior probabilities of each state for each observation See Also -------- score : Compute the log probability under the model decode : Find most likely state sequence corresponding to a `obs` """ obs = np.asarray(obs) framelogprob = self._compute_log_likelihood(obs) logprob, fwdlattice = self._do_forward_pass(framelogprob) bwdlattice = self._do_backward_pass(framelogprob) gamma = fwdlattice + bwdlattice # gamma is guaranteed to be correctly normalized by logprob at # all frames, unless we do approximate inference using pruning. # So, we will normalize each frame explicitly in case we # pruned too aggressively. posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T posteriors += np.finfo(np.float32).eps posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1)) return logprob, posteriors def score(self, obs): """Compute the log probability under the model. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : float Log likelihood of the ``obs``. See Also -------- score_samples : Compute the log probability under the model and posteriors decode : Find most likely state sequence corresponding to a `obs` """ obs = np.asarray(obs) framelogprob = self._compute_log_likelihood(obs) logprob, _ = self._do_forward_pass(framelogprob) return logprob def _decode_viterbi(self, obs): """Find most likely state sequence corresponding to ``obs``. Uses the Viterbi algorithm. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- viterbi_logprob : float Log probability of the maximum likelihood path through the HMM. state_sequence : array_like, shape (n,) Index of the most likely states for each observation. See Also -------- score_samples : Compute the log probability under the model and posteriors. score : Compute the log probability under the model """ obs = np.asarray(obs) framelogprob = self._compute_log_likelihood(obs) viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob) return viterbi_logprob, state_sequence def _decode_map(self, obs): """Find most likely state sequence corresponding to `obs`. Uses the maximum a posteriori estimation. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- map_logprob : float Log probability of the maximum likelihood path through the HMM state_sequence : array_like, shape (n,) Index of the most likely states for each observation See Also -------- score_samples : Compute the log probability under the model and posteriors. score : Compute the log probability under the model. """ _, posteriors = self.score_samples(obs) state_sequence = np.argmax(posteriors, axis=1) map_logprob = np.max(posteriors, axis=1).sum() return map_logprob, state_sequence def decode(self, obs, algorithm="viterbi"): """Find most likely state sequence corresponding to ``obs``. Uses the selected algorithm for decoding. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. algorithm : string, one of the `decoder_algorithms` decoder algorithm to be used Returns ------- logprob : float Log probability of the maximum likelihood path through the HMM state_sequence : array_like, shape (n,) Index of the most likely states for each observation See Also -------- score_samples : Compute the log probability under the model and posteriors. score : Compute the log probability under the model. """ if self._algorithm in decoder_algorithms: algorithm = self._algorithm elif algorithm in decoder_algorithms: algorithm = algorithm decoder = {"viterbi": self._decode_viterbi, "map": self._decode_map} logprob, state_sequence = decoder[algorithm](obs) return logprob, state_sequence def predict(self, obs, algorithm="viterbi"): """Find most likely state sequence corresponding to `obs`. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- state_sequence : array_like, shape (n,) Index of the most likely states for each observation """ _, state_sequence = self.decode(obs, algorithm) return state_sequence def predict_proba(self, obs): """Compute the posterior probability for each state in the model Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- T : array-like, shape (n, n_components) Returns the probability of the sample for each state in the model. """ _, posteriors = self.score_samples(obs) return posteriors def sample(self, n=1, random_state=None): """Generate random samples from the model. Parameters ---------- n : int Number of samples to generate. random_state: RandomState or an int seed (0 by default) A random number generator instance. If None is given, the object's random_state is used Returns ------- (obs, hidden_states) obs : array_like, length `n` List of samples hidden_states : array_like, length `n` List of hidden states """ if random_state is None: random_state = self.random_state random_state = check_random_state(random_state) startprob_pdf = self.startprob_ startprob_cdf = np.cumsum(startprob_pdf) transmat_pdf = self.transmat_ transmat_cdf = np.cumsum(transmat_pdf, 1) # Initial state. rand = random_state.rand() currstate = (startprob_cdf > rand).argmax() hidden_states = [currstate] obs = [self._generate_sample_from_state( currstate, random_state=random_state)] for _ in range(n - 1): rand = random_state.rand() currstate = (transmat_cdf[currstate] > rand).argmax() hidden_states.append(currstate) obs.append(self._generate_sample_from_state( currstate, random_state=random_state)) return np.array(obs), np.array(hidden_states, dtype=int) def fit(self, obs): """Estimate model parameters. An initialization step is performed before entering the EM algorithm. If you want to avoid this step, pass proper ``init_params`` keyword argument to estimator's constructor. Parameters ---------- obs : list List of array-like observation sequences, each of which has shape (n_i, n_features), where n_i is the length of the i_th observation. Notes ----- In general, `logprob` should be non-decreasing unless aggressive pruning is used. Decreasing `logprob` is generally a sign of overfitting (e.g. a covariance parameter getting too small). You can fix this by getting more training data, or strengthening the appropriate subclass-specific regularization parameter. """ if self.algorithm not in decoder_algorithms: self._algorithm = "viterbi" self._init(obs, self.init_params) logprob = [] for i in range(self.n_iter): # Expectation step stats = self._initialize_sufficient_statistics() curr_logprob = 0 for seq in obs: framelogprob = self._compute_log_likelihood(seq) lpr, fwdlattice = self._do_forward_pass(framelogprob) bwdlattice = self._do_backward_pass(framelogprob) gamma = fwdlattice + bwdlattice posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T curr_logprob += lpr self._accumulate_sufficient_statistics( stats, seq, framelogprob, posteriors, fwdlattice, bwdlattice, self.params) logprob.append(curr_logprob) # Check for convergence. if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh: break # Maximization step self._do_mstep(stats, self.params) return self def _get_algorithm(self): "decoder algorithm" return self._algorithm def _set_algorithm(self, algorithm): if algorithm not in decoder_algorithms: raise ValueError("algorithm must be one of the decoder_algorithms") self._algorithm = algorithm algorithm = property(_get_algorithm, _set_algorithm) def _get_startprob(self): """Mixing startprob for each state.""" return np.exp(self._log_startprob) def _set_startprob(self, startprob): if startprob is None: startprob = np.tile(1.0 / self.n_components, self.n_components) else: startprob = np.asarray(startprob, dtype=np.float) # check if there exists a component whose value is exactly zero # if so, add a small number and re-normalize if not np.alltrue(startprob): normalize(startprob) if len(startprob) != self.n_components: raise ValueError('startprob must have length n_components') if not np.allclose(np.sum(startprob), 1.0): raise ValueError('startprob must sum to 1.0') self._log_startprob = np.log(np.asarray(startprob).copy()) startprob_ = property(_get_startprob, _set_startprob) def _get_transmat(self): """Matrix of transition probabilities.""" return np.exp(self._log_transmat) def _set_transmat(self, transmat): if transmat is None: transmat = np.tile(1.0 / self.n_components, (self.n_components, self.n_components)) # check if there exists a component whose value is exactly zero # if so, add a small number and re-normalize if not np.alltrue(transmat): normalize(transmat, axis=1) if (np.asarray(transmat).shape != (self.n_components, self.n_components)): raise ValueError('transmat must have shape ' '(n_components, n_components)') if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)): raise ValueError('Rows of transmat must sum to 1.0') self._log_transmat = np.log(np.asarray(transmat).copy()) underflow_idx = np.isnan(self._log_transmat) self._log_transmat[underflow_idx] = NEGINF transmat_ = property(_get_transmat, _set_transmat) def _do_viterbi_pass(self, framelogprob): n_observations, n_components = framelogprob.shape state_sequence, logprob = _hmmc._viterbi( n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob) return logprob, state_sequence def _do_forward_pass(self, framelogprob): n_observations, n_components = framelogprob.shape fwdlattice = np.zeros((n_observations, n_components)) _hmmc._forward(n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob, fwdlattice) fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF return logsumexp(fwdlattice[-1]), fwdlattice def _do_backward_pass(self, framelogprob): n_observations, n_components = framelogprob.shape bwdlattice = np.zeros((n_observations, n_components)) _hmmc._backward(n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob, bwdlattice) bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF return bwdlattice def _compute_log_likelihood(self, obs): pass def _generate_sample_from_state(self, state, random_state=None): pass def _init(self, obs, params): if 's' in params: self.startprob_.fill(1.0 / self.n_components) if 't' in params: self.transmat_.fill(1.0 / self.n_components) # Methods used by self.fit() def _initialize_sufficient_statistics(self): stats = {'nobs': 0, 'start': np.zeros(self.n_components), 'trans': np.zeros((self.n_components, self.n_components))} return stats def _accumulate_sufficient_statistics(self, stats, seq, framelogprob, posteriors, fwdlattice, bwdlattice, params): stats['nobs'] += 1 if 's' in params: stats['start'] += posteriors[0] if 't' in params: n_observations, n_components = framelogprob.shape # when the sample is of length 1, it contains no transitions # so there is no reason to update our trans. matrix estimate if n_observations > 1: lneta = np.zeros((n_observations - 1, n_components, n_components)) lnP = logsumexp(fwdlattice[-1]) _hmmc._compute_lneta(n_observations, n_components, fwdlattice, self._log_transmat, bwdlattice, framelogprob, lnP, lneta) stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700)) def _do_mstep(self, stats, params): # Based on Huang, Acero, Hon, "Spoken Language Processing", # p. 443 - 445 if self.startprob_prior is None: self.startprob_prior = 1.0 if self.transmat_prior is None: self.transmat_prior = 1.0 if 's' in params: self.startprob_ = normalize( np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20)) if 't' in params: transmat_ = normalize( np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20), axis=1) self.transmat_ = transmat_ class GaussianHMM(_BaseHMM): """Hidden Markov Model with Gaussian emissions Representation of a hidden Markov model probability distribution. This class allows for easy evaluation of, sampling from, and maximum-likelihood estimation of the parameters of a HMM. .. warning:: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Parameters ---------- n_components : int Number of states. ``_covariance_type`` : string String describing the type of covariance parameters to use. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. Attributes ---------- ``_covariance_type`` : string String describing the type of covariance parameters used by the model. Must be one of 'spherical', 'tied', 'diag', 'full'. n_features : int Dimensionality of the Gaussian emissions. n_components : int Number of states in the model. transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. means : array, shape (`n_components`, `n_features`) Mean parameters for each state. covars : array Covariance parameters for each state. The shape depends on ``_covariance_type``:: (`n_components`,) if 'spherical', (`n_features`, `n_features`) if 'tied', (`n_components`, `n_features`) if 'diag', (`n_components`, `n_features`, `n_features`) if 'full' random_state: RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars. Defaults to all parameters. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars. Defaults to all parameters. Examples -------- >>> from sklearn.hmm import GaussianHMM >>> GaussianHMM(n_components=2) ... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE GaussianHMM(algorithm='viterbi',... See Also -------- GMM : Gaussian mixture model """ def __init__(self, n_components=1, covariance_type='diag', startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", means_prior=None, means_weight=0, covars_prior=1e-2, covars_weight=1, random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): _BaseHMM.__init__(self, n_components, startprob, transmat, startprob_prior=startprob_prior, transmat_prior=transmat_prior, algorithm=algorithm, random_state=random_state, n_iter=n_iter, thresh=thresh, params=params, init_params=init_params) self._covariance_type = covariance_type if not covariance_type in ['spherical', 'tied', 'diag', 'full']: raise ValueError('bad covariance_type') self.means_prior = means_prior self.means_weight = means_weight self.covars_prior = covars_prior self.covars_weight = covars_weight @property def covariance_type(self): """Covariance type of the model. Must be one of 'spherical', 'tied', 'diag', 'full'. """ return self._covariance_type def _get_means(self): """Mean parameters for each state.""" return self._means_ def _set_means(self, means): means = np.asarray(means) if (hasattr(self, 'n_features') and means.shape != (self.n_components, self.n_features)): raise ValueError('means must have shape ' '(n_components, n_features)') self._means_ = means.copy() self.n_features = self._means_.shape[1] means_ = property(_get_means, _set_means) def _get_covars(self): """Return covars as a full matrix.""" if self._covariance_type == 'full': return self._covars_ elif self._covariance_type == 'diag': return [np.diag(cov) for cov in self._covars_] elif self._covariance_type == 'tied': return [self._covars_] * self.n_components elif self._covariance_type == 'spherical': return [np.eye(self.n_features) * f for f in self._covars_] def _set_covars(self, covars): covars = np.asarray(covars) _validate_covars(covars, self._covariance_type, self.n_components) self._covars_ = covars.copy() covars_ = property(_get_covars, _set_covars) def _compute_log_likelihood(self, obs): check_is_fitted(self, '_means_') return log_multivariate_normal_density( obs, self._means_, self._covars_, self._covariance_type) def _generate_sample_from_state(self, state, random_state=None): if self._covariance_type == 'tied': cv = self._covars_ else: cv = self._covars_[state] return sample_gaussian(self._means_[state], cv, self._covariance_type, random_state=random_state) def _init(self, obs, params='stmc'): super(GaussianHMM, self)._init(obs, params=params) if (hasattr(self, 'n_features') and self.n_features != obs[0].shape[1]): raise ValueError('Unexpected number of dimensions, got %s but ' 'expected %s' % (obs[0].shape[1], self.n_features)) self.n_features = obs[0].shape[1] if 'm' in params: self._means_ = cluster.KMeans( n_clusters=self.n_components).fit(obs[0]).cluster_centers_ if 'c' in params: cv = np.cov(obs[0].T) if not cv.shape: cv.shape = (1, 1) self._covars_ = distribute_covar_matrix_to_match_covariance_type( cv, self._covariance_type, self.n_components) self._covars_[self._covars_ == 0] = 1e-5 def _initialize_sufficient_statistics(self): stats = super(GaussianHMM, self)._initialize_sufficient_statistics() stats['post'] = np.zeros(self.n_components) stats['obs'] = np.zeros((self.n_components, self.n_features)) stats['obs**2'] = np.zeros((self.n_components, self.n_features)) stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features, self.n_features)) return stats def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params): super(GaussianHMM, self)._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params) if 'm' in params or 'c' in params: stats['post'] += posteriors.sum(axis=0) stats['obs'] += np.dot(posteriors.T, obs) if 'c' in params: if self._covariance_type in ('spherical', 'diag'): stats['obs**2'] += np.dot(posteriors.T, obs ** 2) elif self._covariance_type in ('tied', 'full'): for t, o in enumerate(obs): obsobsT = np.outer(o, o) for c in range(self.n_components): stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT def _do_mstep(self, stats, params): super(GaussianHMM, self)._do_mstep(stats, params) # Based on Huang, Acero, Hon, "Spoken Language Processing", # p. 443 - 445 denom = stats['post'][:, np.newaxis] if 'm' in params: prior = self.means_prior weight = self.means_weight if prior is None: weight = 0 prior = 0 self._means_ = (weight * prior + stats['obs']) / (weight + denom) if 'c' in params: covars_prior = self.covars_prior covars_weight = self.covars_weight if covars_prior is None: covars_weight = 0 covars_prior = 0 means_prior = self.means_prior means_weight = self.means_weight if means_prior is None: means_weight = 0 means_prior = 0 meandiff = self._means_ - means_prior if self._covariance_type in ('spherical', 'diag'): cv_num = (means_weight * (meandiff) ** 2 + stats['obs**2'] - 2 * self._means_ * stats['obs'] + self._means_ ** 2 * denom) cv_den = max(covars_weight - 1, 0) + denom self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den, 1e-5) if self._covariance_type == 'spherical': self._covars_ = np.tile( self._covars_.mean(1)[:, np.newaxis], (1, self._covars_.shape[1])) elif self._covariance_type in ('tied', 'full'): cvnum = np.empty((self.n_components, self.n_features, self.n_features)) for c in range(self.n_components): obsmean = np.outer(stats['obs'][c], self._means_[c]) cvnum[c] = (means_weight * np.outer(meandiff[c], meandiff[c]) + stats['obs*obs.T'][c] - obsmean - obsmean.T + np.outer(self._means_[c], self._means_[c]) * stats['post'][c]) cvweight = max(covars_weight - self.n_features, 0) if self._covariance_type == 'tied': self._covars_ = ((covars_prior + cvnum.sum(axis=0)) / (cvweight + stats['post'].sum())) elif self._covariance_type == 'full': self._covars_ = ((covars_prior + cvnum) / (cvweight + stats['post'][:, None, None])) def fit(self, obs): """Estimate model parameters. An initialization step is performed before entering the EM algorithm. If you want to avoid this step, pass proper ``init_params`` keyword argument to estimator's constructor. Parameters ---------- obs : list List of array-like observation sequences, each of which has shape (n_i, n_features), where n_i is the length of the i_th observation. Notes ----- In general, `logprob` should be non-decreasing unless aggressive pruning is used. Decreasing `logprob` is generally a sign of overfitting (e.g. the covariance parameter on one or more components becomminging too small). You can fix this by getting more training data, or increasing covars_prior. """ return super(GaussianHMM, self).fit(obs) class MultinomialHMM(_BaseHMM): """Hidden Markov Model with multinomial (discrete) emissions .. warning:: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Attributes ---------- n_components : int Number of states in the model. n_symbols : int Number of possible symbols emitted by the model (in the observations). transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. emissionprob : array, shape ('n_components`, 'n_symbols`) Probability of emitting a given symbol when in each state. random_state: RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, 'e' for emmissionprob. Defaults to all parameters. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, 'e' for emmissionprob. Defaults to all parameters. Examples -------- >>> from sklearn.hmm import MultinomialHMM >>> MultinomialHMM(n_components=2) ... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE MultinomialHMM(algorithm='viterbi',... See Also -------- GaussianHMM : HMM with Gaussian emissions """ def __init__(self, n_components=1, startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): """Create a hidden Markov model with multinomial emissions. Parameters ---------- n_components : int Number of states. """ _BaseHMM.__init__(self, n_components, startprob, transmat, startprob_prior=startprob_prior, transmat_prior=transmat_prior, algorithm=algorithm, random_state=random_state, n_iter=n_iter, thresh=thresh, params=params, init_params=init_params) def _get_emissionprob(self): """Emission probability distribution for each state.""" return np.exp(self._log_emissionprob) def _set_emissionprob(self, emissionprob): emissionprob = np.asarray(emissionprob) if hasattr(self, 'n_symbols') and \ emissionprob.shape != (self.n_components, self.n_symbols): raise ValueError('emissionprob must have shape ' '(n_components, n_symbols)') # check if there exists a component whose value is exactly zero # if so, add a small number and re-normalize if not np.alltrue(emissionprob): normalize(emissionprob) self._log_emissionprob = np.log(emissionprob) underflow_idx = np.isnan(self._log_emissionprob) self._log_emissionprob[underflow_idx] = NEGINF self.n_symbols = self._log_emissionprob.shape[1] emissionprob_ = property(_get_emissionprob, _set_emissionprob) def _compute_log_likelihood(self, obs): check_is_fitted(self, 'emissionprob_') return self._log_emissionprob[:, obs].T def _generate_sample_from_state(self, state, random_state=None): cdf = np.cumsum(self.emissionprob_[state, :]) random_state = check_random_state(random_state) rand = random_state.rand() symbol = (cdf > rand).argmax() return symbol def _init(self, obs, params='ste'): super(MultinomialHMM, self)._init(obs, params=params) self.random_state = check_random_state(self.random_state) if 'e' in params: if not hasattr(self, 'n_symbols'): symbols = set() for o in obs: symbols = symbols.union(set(o)) self.n_symbols = len(symbols) emissionprob = normalize(self.random_state.rand(self.n_components, self.n_symbols), 1) self.emissionprob_ = emissionprob def _initialize_sufficient_statistics(self): stats = super(MultinomialHMM, self)._initialize_sufficient_statistics() stats['obs'] = np.zeros((self.n_components, self.n_symbols)) return stats def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params): super(MultinomialHMM, self)._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params) if 'e' in params: for t, symbol in enumerate(obs): stats['obs'][:, symbol] += posteriors[t] def _do_mstep(self, stats, params): super(MultinomialHMM, self)._do_mstep(stats, params) if 'e' in params: self.emissionprob_ = (stats['obs'] / stats['obs'].sum(1)[:, np.newaxis]) def _check_input_symbols(self, obs): """check if input can be used for Multinomial.fit input must be both positive integer array and every element must be continuous. e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not """ symbols = np.asarray(obs).flatten() if symbols.dtype.kind != 'i': # input symbols must be integer return False if len(symbols) == 1: # input too short return False if np.any(symbols < 0): # input contains negative intiger return False symbols.sort() if np.any(np.diff(symbols) > 1): # input is discontinous return False return True def fit(self, obs, **kwargs): """Estimate model parameters. An initialization step is performed before entering the EM algorithm. If you want to avoid this step, pass proper ``init_params`` keyword argument to estimator's constructor. Parameters ---------- obs : list List of array-like observation sequences, each of which has shape (n_i, n_features), where n_i is the length of the i_th observation. """ err_msg = ("Input must be both positive integer array and " "every element must be continuous, but %s was given.") if not self._check_input_symbols(obs): raise ValueError(err_msg % obs) return _BaseHMM.fit(self, obs, **kwargs) class GMMHMM(_BaseHMM): """Hidden Markov Model with Gaussin mixture emissions .. warning:: The HMM module and its functions will be removed in 0.17 as it no longer falls within the project's scope and API. Attributes ---------- n_components : int Number of states in the model. transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. gmms : array of GMM objects, length `n_components` GMM emission distributions for each state. random_state : RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, 'c' for covars, and 'w' for GMM mixing weights. Defaults to all parameters. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars, and 'w' for GMM mixing weights. Defaults to all parameters. Examples -------- >>> from sklearn.hmm import GMMHMM >>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag') ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE GMMHMM(algorithm='viterbi', covariance_type='diag',... See Also -------- GaussianHMM : HMM with Gaussian emissions """ def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", gmms=None, covariance_type='diag', covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): """Create a hidden Markov model with GMM emissions. Parameters ---------- n_components : int Number of states. """ _BaseHMM.__init__(self, n_components, startprob, transmat, startprob_prior=startprob_prior, transmat_prior=transmat_prior, algorithm=algorithm, random_state=random_state, n_iter=n_iter, thresh=thresh, params=params, init_params=init_params) # XXX: Hotfit for n_mix that is incompatible with the scikit's # BaseEstimator API self.n_mix = n_mix self._covariance_type = covariance_type self.covars_prior = covars_prior self.gmms = gmms if gmms is None: gmms = [] for x in range(self.n_components): if covariance_type is None: g = GMM(n_mix) else: g = GMM(n_mix, covariance_type=covariance_type) gmms.append(g) self.gmms_ = gmms # Read-only properties. @property def covariance_type(self): """Covariance type of the model. Must be one of 'spherical', 'tied', 'diag', 'full'. """ return self._covariance_type def _compute_log_likelihood(self, obs): return np.array([g.score(obs) for g in self.gmms_]).T def _generate_sample_from_state(self, state, random_state=None): return self.gmms_[state].sample(1, random_state=random_state).flatten() def _init(self, obs, params='stwmc'): super(GMMHMM, self)._init(obs, params=params) allobs = np.concatenate(obs, 0) for g in self.gmms_: g.set_params(init_params=params, n_iter=0) g.fit(allobs) def _initialize_sufficient_statistics(self): stats = super(GMMHMM, self)._initialize_sufficient_statistics() stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_] stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_] stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_] return stats def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params): super(GMMHMM, self)._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params) for state, g in enumerate(self.gmms_): _, lgmm_posteriors = g.score_samples(obs) lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis] + np.finfo(np.float).eps) gmm_posteriors = np.exp(lgmm_posteriors) tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type) n_features = g.means_.shape[1] tmp_gmm._set_covars( distribute_covar_matrix_to_match_covariance_type( np.eye(n_features), g.covariance_type, g.n_components)) norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params) if np.any(np.isnan(tmp_gmm.covars_)): raise ValueError stats['norm'][state] += norm if 'm' in params: stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis] if 'c' in params: if tmp_gmm.covariance_type == 'tied': stats['covars'][state] += tmp_gmm.covars_ * norm.sum() else: cvnorm = np.copy(norm) shape = np.ones(tmp_gmm.covars_.ndim) shape[0] = np.shape(tmp_gmm.covars_)[0] cvnorm.shape = shape stats['covars'][state] += tmp_gmm.covars_ * cvnorm def _do_mstep(self, stats, params): super(GMMHMM, self)._do_mstep(stats, params) # All that is left to do is to apply covars_prior to the # parameters updated in _accumulate_sufficient_statistics. for state, g in enumerate(self.gmms_): n_features = g.means_.shape[1] norm = stats['norm'][state] if 'w' in params: g.weights_ = normalize(norm) if 'm' in params: g.means_ = stats['means'][state] / norm[:, np.newaxis] if 'c' in params: if g.covariance_type == 'tied': g.covars_ = ((stats['covars'][state] + self.covars_prior * np.eye(n_features)) / norm.sum()) else: cvnorm = np.copy(norm) shape = np.ones(g.covars_.ndim) shape[0] = np.shape(g.covars_)[0] cvnorm.shape = shape if (g.covariance_type in ['spherical', 'diag']): g.covars_ = (stats['covars'][state] + self.covars_prior) / cvnorm elif g.covariance_type == 'full': eye = np.eye(n_features) g.covars_ = ((stats['covars'][state] + self.covars_prior * eye[np.newaxis]) / cvnorm)
bsd-3-clause
xyguo/scikit-learn
sklearn/utils/testing.py
13
27312
"""Testing utilities.""" # Copyright (c) 2011, 2012 # Authors: Pietro Berkes, # Andreas Muller # Mathieu Blondel # Olivier Grisel # Arnaud Joly # Denis Engemann # Giorgio Patrini # Thierry Guillemot # License: BSD 3 clause import os import inspect import pkgutil import warnings import sys import re import platform import struct import scipy as sp import scipy.io from functools import wraps from operator import itemgetter try: # Python 2 from urllib2 import urlopen from urllib2 import HTTPError except ImportError: # Python 3+ from urllib.request import urlopen from urllib.error import HTTPError import tempfile import shutil import os.path as op import atexit # WindowsError only exist on Windows try: WindowsError except NameError: WindowsError = None import sklearn from sklearn.base import BaseEstimator from sklearn.externals import joblib # Conveniently import all assertions in one place. from nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools import assert_true from nose.tools import assert_false from nose.tools import assert_raises from nose.tools import raises from nose import SkipTest from nose import with_setup from numpy.testing import assert_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_less from numpy.testing import assert_approx_equal import numpy as np from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin) from sklearn.cluster import DBSCAN __all__ = ["assert_equal", "assert_not_equal", "assert_raises", "assert_raises_regexp", "raises", "with_setup", "assert_true", "assert_false", "assert_almost_equal", "assert_array_equal", "assert_array_almost_equal", "assert_array_less", "assert_less", "assert_less_equal", "assert_greater", "assert_greater_equal", "assert_approx_equal"] try: from nose.tools import assert_in, assert_not_in except ImportError: # Nose < 1.0.0 def assert_in(x, container): assert_true(x in container, msg="%r in %r" % (x, container)) def assert_not_in(x, container): assert_false(x in container, msg="%r in %r" % (x, container)) try: from nose.tools import assert_raises_regex except ImportError: # for Python 2 def assert_raises_regex(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs): """Helper function to check for message patterns in exceptions.""" not_raised = False try: callable_obj(*args, **kwargs) not_raised = True except expected_exception as e: error_message = str(e) if not re.compile(expected_regexp).search(error_message): raise AssertionError("Error message should match pattern " "%r. %r does not." % (expected_regexp, error_message)) if not_raised: raise AssertionError("%s not raised by %s" % (expected_exception.__name__, callable_obj.__name__)) # assert_raises_regexp is deprecated in Python 3.4 in favor of # assert_raises_regex but lets keep the backward compat in scikit-learn with # the old name for now assert_raises_regexp = assert_raises_regex def _assert_less(a, b, msg=None): message = "%r is not lower than %r" % (a, b) if msg is not None: message += ": " + msg assert a < b, message def _assert_greater(a, b, msg=None): message = "%r is not greater than %r" % (a, b) if msg is not None: message += ": " + msg assert a > b, message def assert_less_equal(a, b, msg=None): message = "%r is not lower than or equal to %r" % (a, b) if msg is not None: message += ": " + msg assert a <= b, message def assert_greater_equal(a, b, msg=None): message = "%r is not greater than or equal to %r" % (a, b) if msg is not None: message += ": " + msg assert a >= b, message def assert_warns(warning_class, func, *args, **kw): """Test that a certain warning occurs. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. func : callable Calable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func` Returns ------- result : the return value of `func` """ # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Trigger a warning. result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = any(warning.category is warning_class for warning in w) if not found: raise AssertionError("%s did not give warning: %s( is %s)" % (func.__name__, warning_class, w)) return result def assert_warns_message(warning_class, message, func, *args, **kw): # very important to avoid uncontrolled state propagation """Test that a certain warning occurs and with a certain message. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. message : str | callable The entire message or a substring to test for. If callable, it takes a string as argument and will trigger an assertion error if it returns `False`. func : callable Calable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func`. Returns ------- result : the return value of `func` """ clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") if hasattr(np, 'VisibleDeprecationWarning'): # Let's not catch the numpy internal DeprecationWarnings warnings.simplefilter('ignore', np.VisibleDeprecationWarning) # Trigger a warning. result = func(*args, **kw) # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = [issubclass(warning.category, warning_class) for warning in w] if not any(found): raise AssertionError("No warning raised for %s with class " "%s" % (func.__name__, warning_class)) message_found = False # Checks the message of all warnings belong to warning_class for index in [i for i, x in enumerate(found) if x]: # substring will match, the entire message with typo won't msg = w[index].message # For Python 3 compatibility msg = str(msg.args[0] if hasattr(msg, 'args') else msg) if callable(message): # add support for certain tests check_in_message = message else: check_in_message = lambda msg: message in msg if check_in_message(msg): message_found = True break if not message_found: raise AssertionError("Did not receive the message you expected " "('%s') for <%s>, got: '%s'" % (message, func.__name__, msg)) return result # To remove when we support numpy 1.7 def assert_no_warnings(func, *args, **kw): # XXX: once we may depend on python >= 2.6, this can be replaced by the # warnings module context manager. # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] if len(w) > 0: raise AssertionError("Got warnings when calling %s: %s" % (func.__name__, w)) return result def ignore_warnings(obj=None, category=Warning): """Context manager and decorator to ignore warnings. Note. Using this (in both variants) will clear all warnings from all python modules loaded. In case you need to test cross-module-warning-logging this is not your tool of choice. Parameters ---------- category : warning class, defaults to Warning. The category to filter. If Warning, all categories will be muted. Examples -------- >>> with ignore_warnings(): ... warnings.warn('buhuhuhu') >>> def nasty_warn(): ... warnings.warn('buhuhuhu') ... print(42) >>> ignore_warnings(nasty_warn)() 42 """ if callable(obj): return _IgnoreWarnings(category=category)(obj) else: return _IgnoreWarnings(category=category) class _IgnoreWarnings(object): """Improved and simplified Python warnings context manager and decorator. This class allows to ignore the warnings raise by a function. Copied from Python 2.7.5 and modified as required. Parameters ---------- category : tuple of warning class, defaut to Warning The category to filter. By default, all the categories will be muted. """ def __init__(self, category): self._record = True self._module = sys.modules['warnings'] self._entered = False self.log = [] self.category = category def __call__(self, fn): """Decorator to catch and hide warnings without visual nesting.""" @wraps(fn) def wrapper(*args, **kwargs): # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(): warnings.simplefilter("ignore", self.category) return fn(*args, **kwargs) return wrapper def __repr__(self): args = [] if self._record: args.append("record=True") if self._module is not sys.modules['warnings']: args.append("module=%r" % self._module) name = type(self).__name__ return "%s(%s)" % (name, ", ".join(args)) def __enter__(self): clean_warning_registry() # be safe and not propagate state + chaos warnings.simplefilter("ignore", self.category) if self._entered: raise RuntimeError("Cannot enter %r twice" % self) self._entered = True self._filters = self._module.filters self._module.filters = self._filters[:] self._showwarning = self._module.showwarning def __exit__(self, *exc_info): if not self._entered: raise RuntimeError("Cannot exit %r without entering first" % self) self._module.filters = self._filters self._module.showwarning = self._showwarning self.log[:] = [] clean_warning_registry() # be safe and not propagate state + chaos try: from nose.tools import assert_less except ImportError: assert_less = _assert_less try: from nose.tools import assert_greater except ImportError: assert_greater = _assert_greater def _assert_allclose(actual, desired, rtol=1e-7, atol=0, err_msg='', verbose=True): actual, desired = np.asanyarray(actual), np.asanyarray(desired) if np.allclose(actual, desired, rtol=rtol, atol=atol): return msg = ('Array not equal to tolerance rtol=%g, atol=%g: ' 'actual %s, desired %s') % (rtol, atol, actual, desired) raise AssertionError(msg) if hasattr(np.testing, 'assert_allclose'): assert_allclose = np.testing.assert_allclose else: assert_allclose = _assert_allclose def assert_raise_message(exceptions, message, function, *args, **kwargs): """Helper function to test error messages in exceptions. Parameters ---------- exceptions : exception or tuple of exception Name of the estimator func : callable Calable object to raise error *args : the positional arguments to `func`. **kw : the keyword arguments to `func` """ try: function(*args, **kwargs) except exceptions as e: error_message = str(e) if message not in error_message: raise AssertionError("Error message does not include the expected" " string: %r. Observed error message: %r" % (message, error_message)) else: # concatenate exception names if isinstance(exceptions, tuple): names = " or ".join(e.__name__ for e in exceptions) else: names = exceptions.__name__ raise AssertionError("%s not raised by %s" % (names, function.__name__)) def fake_mldata(columns_dict, dataname, matfile, ordering=None): """Create a fake mldata data set. Parameters ---------- columns_dict : dict, keys=str, values=ndarray Contains data as columns_dict[column_name] = array of data. dataname : string Name of data set. matfile : string or file object The file name string or the file-like object of the output file. ordering : list, default None List of column_names, determines the ordering in the data set. Notes ----- This function transposes all arrays, while fetch_mldata only transposes 'data', keep that into account in the tests. """ datasets = dict(columns_dict) # transpose all variables for name in datasets: datasets[name] = datasets[name].T if ordering is None: ordering = sorted(list(datasets.keys())) # NOTE: setting up this array is tricky, because of the way Matlab # re-packages 1D arrays datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)), dtype='object') for i, name in enumerate(ordering): datasets['mldata_descr_ordering'][0, i] = name scipy.io.savemat(matfile, datasets, oned_as='column') class mock_mldata_urlopen(object): def __init__(self, mock_datasets): """Object that mocks the urlopen function to fake requests to mldata. `mock_datasets` is a dictionary of {dataset_name: data_dict}, or {dataset_name: (data_dict, ordering). `data_dict` itself is a dictionary of {column_name: data_array}, and `ordering` is a list of column_names to determine the ordering in the data set (see `fake_mldata` for details). When requesting a dataset with a name that is in mock_datasets, this object creates a fake dataset in a StringIO object and returns it. Otherwise, it raises an HTTPError. """ self.mock_datasets = mock_datasets def __call__(self, urlname): dataset_name = urlname.split('/')[-1] if dataset_name in self.mock_datasets: resource_name = '_' + dataset_name from io import BytesIO matfile = BytesIO() dataset = self.mock_datasets[dataset_name] ordering = None if isinstance(dataset, tuple): dataset, ordering = dataset fake_mldata(dataset, resource_name, matfile, ordering) matfile.seek(0) return matfile else: raise HTTPError(urlname, 404, dataset_name + " is not available", [], None) def install_mldata_mock(mock_datasets): # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets) def uninstall_mldata_mock(): # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = urlopen # Meta estimators need another estimator to be instantiated. META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputEstimator", "MultiOutputRegressor", "MultiOutputClassifier", "OutputCodeClassifier", "OneVsRestClassifier", "RFE", "RFECV", "BaseEnsemble"] # estimators that there is no way to default-construct sensibly OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV", "SelectFromModel"] # some trange ones DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'TfidfTransformer', 'TfidfVectorizer', 'IsotonicRegression', 'OneHotEncoder', 'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier', 'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures', 'GaussianRandomProjectionHash', 'HashingVectorizer', 'CheckingClassifier', 'PatchExtractor', 'CountVectorizer', # GradientBoosting base estimators, maybe should # exclude them in another way 'ZeroEstimator', 'ScaledLogOddsEstimator', 'QuantileEstimator', 'MeanEstimator', 'LogOddsEstimator', 'PriorProbabilityEstimator', '_SigmoidCalibration', 'VotingClassifier'] def all_estimators(include_meta_estimators=False, include_other=False, type_filter=None, include_dont_test=False): """Get a list of all estimators from sklearn. This function crawls the module and gets all classes that inherit from BaseEstimator. Classes that are defined in test-modules are not included. By default meta_estimators such as GridSearchCV are also not included. Parameters ---------- include_meta_estimators : boolean, default=False Whether to include meta-estimators that can be constructed using an estimator as their first argument. These are currently BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier, OneVsRestClassifier, RFE, RFECV. include_other : boolean, default=False Wether to include meta-estimators that are somehow special and can not be default-constructed sensibly. These are currently Pipeline, FeatureUnion and GridSearchCV include_dont_test : boolean, default=False Whether to include "special" label estimator or test processors. type_filter : string, list of string, or None, default=None Which kind of estimators should be returned. If None, no filter is applied and all estimators are returned. Possible values are 'classifier', 'regressor', 'cluster' and 'transformer' to get estimators only of these specific types, or a list of these to get the estimators that fit at least one of the types. Returns ------- estimators : list of tuples List of (name, class), where ``name`` is the class name as string and ``class`` is the actuall type of the class. """ def is_abstract(c): if not(hasattr(c, '__abstractmethods__')): return False if not len(c.__abstractmethods__): return False return True all_classes = [] # get parent folder path = sklearn.__path__ for importer, modname, ispkg in pkgutil.walk_packages( path=path, prefix='sklearn.', onerror=lambda x: None): if (".tests." in modname): continue module = __import__(modname, fromlist="dummy") classes = inspect.getmembers(module, inspect.isclass) all_classes.extend(classes) all_classes = set(all_classes) estimators = [c for c in all_classes if (issubclass(c[1], BaseEstimator) and c[0] != 'BaseEstimator')] # get rid of abstract base classes estimators = [c for c in estimators if not is_abstract(c[1])] if not include_dont_test: estimators = [c for c in estimators if not c[0] in DONT_TEST] if not include_other: estimators = [c for c in estimators if not c[0] in OTHER] # possibly get rid of meta estimators if not include_meta_estimators: estimators = [c for c in estimators if not c[0] in META_ESTIMATORS] if type_filter is not None: if not isinstance(type_filter, list): type_filter = [type_filter] else: type_filter = list(type_filter) # copy filtered_estimators = [] filters = {'classifier': ClassifierMixin, 'regressor': RegressorMixin, 'transformer': TransformerMixin, 'cluster': ClusterMixin} for name, mixin in filters.items(): if name in type_filter: type_filter.remove(name) filtered_estimators.extend([est for est in estimators if issubclass(est[1], mixin)]) estimators = filtered_estimators if type_filter: raise ValueError("Parameter type_filter must be 'classifier', " "'regressor', 'transformer', 'cluster' or " "None, got" " %s." % repr(type_filter)) # drop duplicates, sort for reproducibility # itemgetter is used to ensure the sort does not extend to the 2nd item of # the tuple return sorted(set(estimators), key=itemgetter(0)) def set_random_state(estimator, random_state=0): """Set random state of an estimator if it has the `random_state` param. Classes for whom random_state is deprecated are ignored. Currently DBSCAN is one such class. """ if isinstance(estimator, DBSCAN): return if "random_state" in estimator.get_params(): estimator.set_params(random_state=random_state) def if_matplotlib(func): """Test decorator that skips test if matplotlib not installed.""" @wraps(func) def run_test(*args, **kwargs): try: import matplotlib matplotlib.use('Agg', warn=False) # this fails if no $DISPLAY specified import matplotlib.pyplot as plt plt.figure() except ImportError: raise SkipTest('Matplotlib not available.') else: return func(*args, **kwargs) return run_test def skip_if_32bit(func): """Test decorator that skips tests on 32bit platforms.""" @wraps(func) def run_test(*args, **kwargs): bits = 8 * struct.calcsize("P") if bits == 32: raise SkipTest('Test skipped on 32bit platforms.') else: return func(*args, **kwargs) return run_test def if_not_mac_os(versions=('10.7', '10.8', '10.9'), message='Multi-process bug in Mac OS X >= 10.7 ' '(see issue #636)'): """Test decorator that skips test if OS is Mac OS X and its major version is one of ``versions``. """ warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed" " in 0.19: use the safer and more generic" " if_safe_multiprocessing_with_blas instead", DeprecationWarning) mac_version, _, _ = platform.mac_ver() skip = '.'.join(mac_version.split('.')[:2]) in versions def decorator(func): if skip: @wraps(func) def func(*args, **kwargs): raise SkipTest(message) return func return decorator def if_safe_multiprocessing_with_blas(func): """Decorator for tests involving both BLAS calls and multiprocessing. Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with some implementation of BLAS (or other libraries that manage an internal posix thread pool) can cause a crash or a freeze of the Python process. In practice all known packaged distributions (from Linux distros or Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to only impact OSX users. This wrapper makes it possible to skip tests that can possibly cause this crash under OS X with. Under Python 3.4+ it is possible to use the `forkserver` start method for multiprocessing to avoid this issue. However it can cause pickling errors on interactively defined functions. It therefore not enabled by default. """ @wraps(func) def run_test(*args, **kwargs): if sys.platform == 'darwin': raise SkipTest( "Possible multi-process bug with some BLAS") return func(*args, **kwargs) return run_test def clean_warning_registry(): """Safe way to reset warnings.""" warnings.resetwarnings() reg = "__warningregistry__" for mod_name, mod in list(sys.modules.items()): if 'six.moves' in mod_name: continue if hasattr(mod, reg): getattr(mod, reg).clear() def check_skip_network(): if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)): raise SkipTest("Text tutorial requires large dataset download") def check_skip_travis(): """Skip test if being run on Travis.""" if os.environ.get('TRAVIS') == "true": raise SkipTest("This test needs to be skipped on Travis") def _delete_folder(folder_path, warn=False): """Utility function to cleanup a temporary folder if still existing. Copy from joblib.pool (for independence). """ try: if os.path.exists(folder_path): # This can fail under windows, # but will succeed when called by atexit shutil.rmtree(folder_path) except WindowsError: if warn: warnings.warn("Could not delete temporary folder %s" % folder_path) class TempMemmap(object): def __init__(self, data, mmap_mode='r'): self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_') self.mmap_mode = mmap_mode self.data = data def __enter__(self): fpath = op.join(self.temp_folder, 'data.pkl') joblib.dump(self.data, fpath) data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode) atexit.register(lambda: _delete_folder(self.temp_folder, warn=True)) return data_read_only def __exit__(self, exc_type, exc_val, exc_tb): _delete_folder(self.temp_folder) with_network = with_setup(check_skip_network) with_travis = with_setup(check_skip_travis)
bsd-3-clause
hlin117/statsmodels
statsmodels/tsa/filters/_utils.py
29
4391
from functools import wraps from statsmodels.tools.data import _is_using_pandas from statsmodels.tsa.base import datetools from statsmodels.tsa.tsatools import freq_to_period def _get_pandas_wrapper(X, trim_head=None, trim_tail=None, names=None): index = X.index #TODO: allow use index labels if trim_head is None and trim_tail is None: index = index elif trim_tail is None: index = index[trim_head:] elif trim_head is None: index = index[:-trim_tail] else: index = index[trim_head:-trim_tail] if hasattr(X, "columns"): if names is None: names = X.columns return lambda x : X.__class__(x, index=index, columns=names) else: if names is None: names = X.name return lambda x : X.__class__(x, index=index, name=names) def _maybe_get_pandas_wrapper(X, trim_head=None, trim_tail=None): """ If using pandas returns a function to wrap the results, e.g., wrapper(X) trim is an integer for the symmetric truncation of the series in some filters. otherwise returns None """ if _is_using_pandas(X, None): return _get_pandas_wrapper(X, trim_head, trim_tail) else: return def _maybe_get_pandas_wrapper_freq(X, trim=None): if _is_using_pandas(X, None): index = X.index func = _get_pandas_wrapper(X, trim) freq = index.inferred_freq return func, freq else: return lambda x : x, None def pandas_wrapper(func, trim_head=None, trim_tail=None, names=None, *args, **kwargs): @wraps(func) def new_func(X, *args, **kwargs): # quick pass-through for do nothing case if not _is_using_pandas(X, None): return func(X, *args, **kwargs) wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail, names) ret = func(X, *args, **kwargs) ret = wrapper_func(ret) return ret return new_func def pandas_wrapper_bunch(func, trim_head=None, trim_tail=None, names=None, *args, **kwargs): @wraps(func) def new_func(X, *args, **kwargs): # quick pass-through for do nothing case if not _is_using_pandas(X, None): return func(X, *args, **kwargs) wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail, names) ret = func(X, *args, **kwargs) ret = wrapper_func(ret) return ret return new_func def pandas_wrapper_predict(func, trim_head=None, trim_tail=None, columns=None, *args, **kwargs): pass def pandas_wrapper_freq(func, trim_head=None, trim_tail=None, freq_kw='freq', columns=None, *args, **kwargs): """ Return a new function that catches the incoming X, checks if it's pandas, calls the functions as is. Then wraps the results in the incoming index. Deals with frequencies. Expects that the function returns a tuple, a Bunch object, or a pandas-object. """ @wraps(func) def new_func(X, *args, **kwargs): # quick pass-through for do nothing case if not _is_using_pandas(X, None): return func(X, *args, **kwargs) wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail, columns) index = X.index freq = index.inferred_freq kwargs.update({freq_kw : freq_to_period(freq)}) ret = func(X, *args, **kwargs) ret = wrapper_func(ret) return ret return new_func def dummy_func(X): return X def dummy_func_array(X): return X.values def dummy_func_pandas_columns(X): return X.values def dummy_func_pandas_series(X): return X['A'] import pandas as pd import numpy as np def test_pandas_freq_decorator(): X = pd.util.testing.makeDataFrame() # in X, get a function back that returns an X with the same columns func = pandas_wrapper(dummy_func) np.testing.assert_equal(func(X.values), X) func = pandas_wrapper(dummy_func_array) pd.util.testing.assert_frame_equal(func(X), X) expected = X.rename(columns=dict(zip('ABCD', 'EFGH'))) func = pandas_wrapper(dummy_func_array, names=list('EFGH')) pd.util.testing.assert_frame_equal(func(X), expected)
bsd-3-clause
mdeger/nest-simulator
topology/examples/test_3d.py
13
2824
# -*- coding: utf-8 -*- # # test_3d.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. ''' NEST Topology Module EXPERIMENTAL example of 3d layer. 3d layers are currently not supported, use at your own risk! Hans Ekkehard Plesser, UMB This example uses the function GetChildren, which is deprecated. A deprecation warning is therefore issued. For details about deprecated functions, see documentation. ''' import nest import pylab import random import nest.topology as topo import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D pylab.ion() nest.ResetKernel() # generate list of 1000 (x,y,z) triplets pos = [[random.uniform(-0.5, 0.5), random.uniform(-0.5, 0.5), random.uniform(-0.5, 0.5)] for j in range(1000)] l1 = topo.CreateLayer( {'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center 'center': [0., 0., 0.], 'positions': pos, 'elements': 'iaf_psc_alpha'}) # visualize # xext, yext = nest.GetStatus(l1, 'topology')[0]['extent'] # xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center'] # l1_children is a work-around until NEST 3.0 is released l1_children = nest.GetChildren(l1)[0] # extract position information, transpose to list of x, y and z positions xpos, ypos, zpos = zip(*topo.GetPosition(l1_children)) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none') # full connections in volume [-0.2,0.2]**3 topo.ConnectLayers(l1, l1, {'connection_type': 'divergent', 'allow_autapses': False, 'mask': {'volume': {'lower_left': [-0.2, -0.2, -0.2], 'upper_right': [0.2, 0.2, 0.2]}}}) # show connections from center element # sender shown in red, targets in green ctr = topo.FindCenterElement(l1) xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr, l1)[0]) xctr, yctr, zctr = topo.GetPosition(ctr)[0] ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r', edgecolor='none') ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g') tgts = topo.GetTargetNodes(ctr, l1)[0] d = topo.Distance(ctr, tgts) plt.figure() plt.hist(d, 25) # plt.show()
gpl-2.0
tulip-control/tulip-control
tulip/abstract/discretization.py
1
70335
# Copyright (c) 2011-2016 by California Institute of Technology # Copyright (c) 2016 by The Regents of the University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder(s) nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDERS OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # """ Algorithms related to discretization of continuous dynamics. See Also ======== L{find_controller} """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging logger = logging.getLogger(__name__) import os import warnings import pprint from copy import deepcopy import multiprocessing as mp import numpy as np from scipy import sparse as sp import polytope as pc from polytope.plot import plot_partition, plot_transition_arrow from tulip import transys as trs from tulip.hybrid import LtiSysDyn, PwaSysDyn from .prop2partition import (PropPreservingPartition, pwa_partition, part2convex) from .feasible import is_feasible, solve_feasible from .plot import plot_ts_on_partition # inline imports: # # inline: import matplotlib.pyplot as plt debug = False class AbstractSwitched(object): """Abstraction of SwitchedSysDyn, with mode-specific and common info. Attributes: - ppp: merged partition, if any Preserves both propositions and dynamics - ts: common TS, if any - ppp2ts: map from C{ppp.regions} to C{ts.states} - modes: dict of {mode: AbstractPwa} - ppp2modes: map from C{ppp.regions} to C{modes[mode].ppp.regions} of the form: {mode: list} where C{list} has same indices as C{ppp.regions} and elements in each C{list} are indices of regions in each C{modes[mode].ppp.regions}. type: dict Each partition corresponds to some mode. (for switched systems) In each mode a L{PwaSysDyn} is active. """ def __init__( self, ppp=None, ts=None, ppp2ts=None, modes=None, ppp2modes=None ): if modes is None: modes = dict() self.ppp = ppp self.ts = ts self.ppp2ts = ppp2ts self.modes = modes self.ppp2modes = ppp2modes def __str__(self): s = 'Abstraction of switched system\n' s += str('common PPP:\n') + str(self.ppp) s += str('common ts:\n') + str(self.ts) for mode, ab in self.modes.items(): s += 'mode: ' + str(mode) s += ', with abstraction:\n' + str(ab) return s def ppp2pwa(self, mode, i): """Return original C{Region} containing C{Region} C{i} in C{mode}. @param mode: key of C{modes} @param i: Region index in common partition C{ppp.regions}. @return: tuple C{(j, region)} of: - index C{j} of C{Region} and - C{Region} object in C{modes[mode].ppp.regions} """ region_idx = self.ppp2modes[mode][i] ab = self.modes[mode] return ab.ppp2pwa(region_idx) def ppp2sys(self, mode, i): """Return index of active PWA subsystem in C{mode}, @param mode: key of C{modes} @param i: Region index in common partition C{ppp.regions}. @return: tuple C{(j, subsystem)} of: - index C{j} of PWA C{subsystem} - L{LtiSysDyn} object C{subsystem} """ region_idx = self.ppp2modes[mode][i] ab = self.modes[mode] return ab.ppp2sys(region_idx) def plot(self, show_ts=False, only_adjacent=False): """Plot mode partitions and merged partition, if one exists. For details see L{AbstractPwa.plot}. """ axs = [] color_seed = 0 # merged partition exists ? if self.ppp is not None: for mode in self.modes: env_mode, sys_mode = mode edge_label = {'env_actions':env_mode, 'sys_actions':sys_mode} ax = _plot_abstraction( self, show_ts=False, only_adjacent=False, color_seed=color_seed ) plot_ts_on_partition( self.ppp, self.ts, self.ppp2ts, edge_label, only_adjacent, ax ) axs += [ax] # plot mode partitions for mode, ab in self.modes.items(): ax = ab.plot(show_ts, only_adjacent, color_seed) ax.set_title('Abstraction for mode: ' + str(mode)) axs += [ax] #if isinstance(self.ts, dict): # for ts in self.ts: # ax = ts.plot() # axs += [ax] return axs class AbstractPwa(object): """Discrete abstraction of PWA dynamics, with attributes: - ppp: Partition into Regions. Each Region corresponds to a discrete state of the abstraction type: L{PropPreservingPartition} - ts: Finite transition system abstracting the continuous system. Each state corresponds to a Region in C{ppp.regions}. It can be fed into discrete synthesis algorithms. type: L{FTS} - ppp2ts: bijection between C{ppp.regions} and C{ts.states}. Has common indices with C{ppp.regions}. Elements are states in C{ts.states}. (usually each state is a str) type: list of states - pwa: system dynamics type: L{PwaSysDyn} - pwa_ppp: partition preserving both: - propositions and - domains of PWA subsystems Used for non-conservative planning. If just L{LtiSysDyn}, then the only difference of C{pwa_ppp} from C{orig_ppp} is convexification. type: L{PropPreservingPartition} - orig_ppp: partition preserving only propositions i.e., agnostic of dynamics type: L{PropPreservingPartition} - disc_params: parameters used in discretization that should be passed to the controller refinement to ensure consistency type: dict If any of the above is not given, then it is initialized to None. Notes ===== 1. There could be some redundancy in ppp and ofts, in that they are both decorated with propositions. This might be useful to keep each of them as functional units on their own (possible to change later). 2. The 'Pwa' in L{AbstractPwa} includes L{LtiSysDyn} as a special case. """ def __init__( self, ppp=None, ts=None, ppp2ts=None, pwa=None, pwa_ppp=None, ppp2pwa=None, ppp2sys=None, orig_ppp=None, ppp2orig=None, disc_params=None ): if disc_params is None: disc_params = dict() self.ppp = ppp self.ts = ts self.ppp2ts = ppp2ts self.pwa = pwa self.pwa_ppp = pwa_ppp self._ppp2pwa = ppp2pwa self._ppp2sys = ppp2sys self.orig_ppp = orig_ppp self._ppp2orig = ppp2orig # original_regions -> pwa_ppp # ppp2orig -> ppp2pwa_ppp # ppp2pwa -> ppp2pwa_sys self.disc_params = disc_params def __str__(self): s = str(self.ppp) s += str(self.ts) s += 30 * '-' + '\n' s += 'Map PPP Regions ---> TS states:\n' s += self._ppp2other_str(self.ppp2ts) + '\n' s += 'Map PPP Regions ---> PWA PPP Regions:\n' s += self._ppp2other_str(self._ppp2pwa) + '\n' s += 'Map PPP Regions ---> PWA Subsystems:\n' s += self._ppp2other_str(self._ppp2sys) + '\n' s += 'Map PPP Regions ---> Original PPP Regions:\n' s += self._ppp2other_str(self._ppp2orig) + '\n' s += 'Discretization Options:\n\t' s += pprint.pformat(self.disc_params) +'\n' return s def ts2ppp(self, state): region_index = self.ppp2ts.index(state) region = self.ppp[region_index] return (region_index, region) def ppp2trans(self, region_index): """Return the transition set constraint and active subsystem, for non-conservative planning. """ reg_idx, pwa_region = self.ppp2pwa(region_index) sys_idx, sys = self.ppp2sys(region_index) return pwa_region, sys def ppp2pwa(self, region_index): """Return dynamics and predicate-preserving region and its index for PWA subsystem active in given region. The returned region is the C{trans_set} used for non-conservative planning. @param region_index: index in C{ppp.regions}. @rtype: C{(i, pwa.pwa_ppp[i])} """ j = self._ppp2pwa[region_index] pwa_region = self.pwa_ppp[j] return (j, pwa_region) def ppp2sys(self, region_index): """Return index and PWA subsystem active in indexed region. Semantics: j-th sub-system is active in i-th Region, where C{j = ppp2pwa[i]} @param region_index: index in C{ppp.regions}. @rtype: C{(i, pwa.list_subsys[i])} """ # LtiSysDyn ? if self._ppp2sys is None: return (0, self.pwa) subsystem_idx = self._ppp2sys[region_index] subsystem = self.pwa.list_subsys[subsystem_idx] return (subsystem_idx, subsystem) def ppp2orig(self, region_index): """Return index and region of original partition. The original partition is w/o any dynamics, not even the PWA domains, only the polytopic predicates. @param region_index: index in C{ppp.regions}. @rtype: C{(i, orig_ppp.regions[i])} """ j = self._ppp2orig[region_index] orig_region = self.orig_ppp[j] return (j, orig_region) def _ppp2other_str(self, ppp2other): if ppp2other is None: return '' s = '' for i, other in enumerate(ppp2other): s += '\t\t' + str(i) + ' -> ' + str(other) + '\n' return s def _debug_str_(self): s = str(self.ppp) s += str(self.ts) s += '(PWA + Prop)-Preserving Partition' s += str(self.pwa_ppp) s += 'Original Prop-Preserving Partition' s += str(self.orig_ppp) return s def plot(self, show_ts=False, only_adjacent=False, color_seed=None): """Plot partition and optionally feasible transitions. @param show_ts: plot feasible transitions on partition @type show_ts: bool @param only_adjacent: plot feasible transitions only between adjacent regions. This reduces clutter, but if horizon > 1 and not all horizon used, then some transitions could be hidden. @param only_adjacent: bool """ ax = _plot_abstraction(self, show_ts, only_adjacent, color_seed) return ax def verify_transitions(self): logger.info('verifying transitions...') for from_state, to_state in self.ts.transitions(): i, from_region = self.ts2ppp(from_state) j, to_region = self.ts2ppp(to_state) trans_set, sys = self.ppp2trans(i) params = {'N', 'close_loop', 'use_all_horizon'} disc_params = {k:v for k,v in self.disc_params.items() if k in params} s0 = solve_feasible(from_region, to_region, sys, trans_set=trans_set, **disc_params) msg = str(i) + ' ---> ' + str(j) if not from_region <= s0: logger.error('incorrect transition: ' + msg) isect = from_region.intersect(s0) ratio = isect.volume /from_region.volume logger.error('intersection volume: ' + str(ratio) + ' %') else: logger.info('correct transition: ' + msg) def _plot_abstraction(ab, show_ts, only_adjacent, color_seed): if ab.ppp is None or ab.ts is None: warnings.warn('Either ppp or ts is None.') return if show_ts: ts = ab.ts ppp2ts = ab.ppp2ts else: ts = None ppp2ts = None ax = ab.ppp.plot( ts, ppp2ts, only_adjacent=only_adjacent, color_seed=color_seed ) #ax = self.ts.plot() return ax def discretize( part, ssys, N=10, min_cell_volume=0.1, closed_loop=True, conservative=False, max_num_poly=5, use_all_horizon=False, trans_length=1, remove_trans=False, abs_tol=1e-7, plotit=False, save_img=False, cont_props=None, plot_every=1, simu_type='bi' ): """Refine the partition via bisimulation or dual-simulation algorithms, and establish transitions based on reachability analysis. Reference ========= U{[NOTM12] <https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>} See Also ======== L{prop2partition.pwa_partition}, L{prop2partition.part2convex} @param part: L{PropPreservingPartition} object @param ssys: L{LtiSysDyn} or L{PwaSysDyn} object @param N: horizon length @param min_cell_volume: the minimum volume of cells in the resulting partition. @param closed_loop: boolean indicating whether the `closed loop` algorithm should be used. default True. @param conservative: if true, force sequence in reachability analysis to stay inside starting cell. If false, safety is ensured by keeping the sequence inside a convexified version of the original proposition preserving cell. @param max_num_poly: maximum number of polytopes in a region to use in reachability analysis. @param use_all_horizon: in closed loop algorithm: if we should look for reachability also in less than N steps. @param trans_length: the number of polytopes allowed to cross in a transition. a value of 1 checks transitions only between neighbors, a value of 2 checks neighbors of neighbors and so on. @param remove_trans: if True, remove found transitions between non-neighbors. @param abs_tol: maximum volume for an "empty" polytope @param plotit: plot partitioning as it evolves @type plotit: boolean, default = False @param save_img: save snapshots of partitioning to PDF files, requires plotit=True @type save_img: boolean, default = False @param cont_props: continuous propositions to plot @type cont_props: list of C{Polytope} @param simu_type: if 'bi', use bisimulation partition; if 'dual', use dual-simulation partition @type simu_type: string, default = 'bi' @rtype: L{AbstractPwa} """ if simu_type == 'bi': AbstractPwa = _discretize_bi( part, ssys, N, min_cell_volume, closed_loop, conservative, max_num_poly, use_all_horizon, trans_length, remove_trans, abs_tol, plotit, save_img, cont_props, plot_every) elif simu_type == 'dual': AbstractPwa = _discretize_dual( part, ssys, N, min_cell_volume, closed_loop, conservative, max_num_poly, use_all_horizon, trans_length, remove_trans, abs_tol, plotit, save_img, cont_props, plot_every) else: raise ValueError( 'Unknown simulation type: "{st}"'.format( st=simu_type)) return AbstractPwa def _discretize_bi( part, ssys, N=10, min_cell_volume=0.1, closed_loop=True, conservative=False, max_num_poly=5, use_all_horizon=False, trans_length=1, remove_trans=False, abs_tol=1e-7, plotit=False, save_img=False, cont_props=None, plot_every=1 ): """Refine the partition and establish transitions based on reachability analysis. Use bi-simulation algorithm. Reference ========= 1. U{[NOTM12] <https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>} 2. Wagenmaker, A. J.; Ozay, N. "A Bisimulation-like Algorithm for Abstracting Control Systems." 54th Annual Allerton Conference on CCC 2016 See Also ======== L{prop2partition.pwa_partition}, L{prop2partition.part2convex} @param part: L{PropPreservingPartition} object @param ssys: L{LtiSysDyn} or L{PwaSysDyn} object @param N: horizon length @param min_cell_volume: the minimum volume of cells in the resulting partition. @param closed_loop: boolean indicating whether the `closed loop` algorithm should be used. default True. @param conservative: if true, force sequence in reachability analysis to stay inside starting cell. If false, safety is ensured by keeping the sequence inside a convexified version of the original proposition preserving cell. @param max_num_poly: maximum number of polytopes in a region to use in reachability analysis. @param use_all_horizon: in closed loop algorithm: if we should look for reachability also in less than N steps. @param trans_length: the number of polytopes allowed to cross in a transition. a value of 1 checks transitions only between neighbors, a value of 2 checks neighbors of neighbors and so on. @param remove_trans: if True, remove found transitions between non-neighbors. @param abs_tol: maximum volume for an "empty" polytope @param plotit: plot partitioning as it evolves @type plotit: boolean, default = False @param save_img: save snapshots of partitioning to PDF files, requires plotit=True @type save_img: boolean, default = False @param cont_props: continuous propositions to plot @type cont_props: list of C{Polytope} @rtype: L{AbstractPwa} """ start_time = os.times()[0] orig_ppp = part min_cell_volume = (min_cell_volume /np.finfo(np.double).eps *np.finfo(np.double).eps) ispwa = isinstance(ssys, PwaSysDyn) islti = isinstance(ssys, LtiSysDyn) if ispwa: (part, ppp2pwa, part2orig) = pwa_partition(ssys, part) else: part2orig = range(len(part)) # Save original polytopes, require them to be convex if conservative: orig_list = None orig = [0] else: (part, new2old) = part2convex(part) # convexify part2orig = [part2orig[i] for i in new2old] # map new regions to pwa subsystems if ispwa: ppp2pwa = [ppp2pwa[i] for i in new2old] remove_trans = False # already allowed in nonconservative orig_list = [] for poly in part: if len(poly) == 0: orig_list.append(poly.copy()) elif len(poly) == 1: orig_list.append(poly[0].copy()) else: raise Exception("discretize: " "problem in convexification") orig = list(range(len(orig_list))) # Cheby radius of disturbance set # (defined within the loop for pwa systems) if islti: if len(ssys.E) > 0: rd = ssys.Wset.chebR else: rd = 0. # Initialize matrix for pairs to check IJ = part.adj.copy() IJ = IJ.todense() IJ = np.array(IJ) logger.debug("\n Starting IJ: \n" + str(IJ) ) # next line omitted in discretize_overlap IJ = reachable_within(trans_length, IJ, np.array(part.adj.todense()) ) # Initialize output num_regions = len(part) transitions = np.zeros( [num_regions, num_regions], dtype = int ) sol = deepcopy(part.regions) adj = part.adj.copy() adj = adj.todense() adj = np.array(adj) # next 2 lines omitted in discretize_overlap if ispwa: subsys_list = list(ppp2pwa) else: subsys_list = None ss = ssys # init graphics if plotit: try: import matplotlib.pyplot as plt plt.ion() fig, (ax1, ax2) = plt.subplots(1, 2) ax1.axis('scaled') ax2.axis('scaled') file_extension = 'pdf' except: logger.error('failed to import matplotlib') plt = None else: plt = None iter_count = 0 # List of how many "new" regions # have been created for each region # and a list of original number of neighbors #num_new_reg = np.zeros(len(orig_list)) #num_orig_neigh = np.sum(adj, axis=1).flatten() - 1 progress = list() # Do the abstraction while np.sum(IJ) > 0: ind = np.nonzero(IJ) # i,j swapped in discretize_overlap i = ind[1][0] j = ind[0][0] IJ[j, i] = 0 si = sol[i] sj = sol[j] si_tmp = deepcopy(si) sj_tmp = deepcopy(sj) if ispwa: ss = ssys.list_subsys[subsys_list[i]] if len(ss.E) > 0: rd, xd = pc.cheby_ball(ss.Wset) else: rd = 0. if conservative: # Don't use trans_set trans_set = None else: # Use original cell as trans_set trans_set = orig_list[orig[i]] S0 = solve_feasible( si, sj, ss, N, closed_loop, use_all_horizon, trans_set, max_num_poly ) msg = '\n Working with partition cells: {i}, {j}'.format(i=i, j=j) logger.info(msg) msg = '\t{i} (#polytopes = {num}), and:\n'.format(i=i, num=len(si)) msg += '\t{j} (#polytopes = {num})\n'.format(j=j, num=len(sj)) if ispwa: msg += '\t with active subsystem: ' msg += '{sys}\n'.format(sys=subsys_list[i]) msg += '\t Computed reachable set S0 with volume: ' msg += '{vol}\n'.format(vol=S0.volume) logger.debug(msg) #logger.debug(r'si \cap s0') isect = si.intersect(S0) vol1 = isect.volume risect, xi = pc.cheby_ball(isect) #logger.debug(r'si \ s0') diff = si.diff(S0) vol2 = diff.volume rdiff, xd = pc.cheby_ball(diff) # if pc.is_fulldim(pc.Region([isect]).intersect(diff)): # logging.getLogger('tulip.polytope').setLevel(logging.DEBUG) # diff = pc.mldivide(si, S0, save=True) # # ax = S0.plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/s0.pdf') # # ax = si.plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/si.pdf') # # ax = isect.plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/isect.pdf') # # ax = diff.plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/diff.pdf') # # ax = isect.intersect(diff).plot() # ax.axis([0.0, 1.0, 0.0, 2.0]) # ax.figure.savefig('./img/diff_cap_isect.pdf') # # logger.error(r'Intersection \cap Difference != \emptyset') # # assert(False) if vol1 <= min_cell_volume: logger.warning('\t too small: si \\cap Pre(sj), ' 'so discard intersection') if vol1 <= min_cell_volume and isect: logger.warning('\t discarded non-empty intersection: ' 'consider reducing min_cell_volume') if vol2 <= min_cell_volume: logger.warning('\t too small: si \\ Pre(sj), so not reached it') # We don't want our partitions to be smaller than the disturbance set # Could be a problem since cheby radius is calculated for smallest # convex polytope, so if we have a region we might throw away a good # cell. if ( vol1 > min_cell_volume and risect > rd and vol2 > min_cell_volume and rdiff > rd): # Make sure new areas are Regions and add proposition lists if len(isect) == 0: isect = pc.Region([isect], si.props) else: isect.props = si.props.copy() if len(diff) == 0: diff = pc.Region([diff], si.props) else: diff.props = si.props.copy() # replace si by intersection (single state) isect_list = pc.separate(isect) sol[i] = isect_list[0] # cut difference into connected pieces difflist = pc.separate(diff) difflist += isect_list[1:] # n_isect = len(isect_list) -1 num_new = len(difflist) # add each piece, as a new state for region in difflist: sol.append(region) # keep track of PWA subsystems map to new states if ispwa: subsys_list.append(subsys_list[i]) n_cells = len(sol) new_idx = range(n_cells-1, n_cells-num_new-1, -1) """Update transition matrix""" transitions = np.pad(transitions, (0,num_new), 'constant') transitions[i, :] = np.zeros(n_cells) for r in new_idx: #transitions[:, r] = transitions[:, i] # All sets reachable from start are reachable from both part's # except possibly the new part transitions[i, r] = 0 transitions[j, r] = 0 # sol[j] is reachable from intersection of sol[i] and S0 if i != j: transitions[j, i] = 1 # sol[j] is reachable from each piece os S0 \cap sol[i] #for k in range(n_cells-n_isect-2, n_cells): # transitions[j, k] = 1 """Update adjacency matrix""" old_adj = np.nonzero(adj[i, :])[0] # reset new adjacencies adj[i, :] = np.zeros([n_cells -num_new]) adj[:, i] = np.zeros([n_cells -num_new]) adj[i, i] = 1 adj = np.pad(adj, (0, num_new), 'constant') for r in new_idx: adj[i, r] = 1 adj[r, i] = 1 adj[r, r] = 1 if not conservative: orig = np.hstack([orig, orig[i]]) # adjacencies between pieces of isect and diff for r in new_idx: for k in new_idx: if r is k: continue if pc.is_adjacent(sol[r], sol[k]): adj[r, k] = 1 adj[k, r] = 1 msg = '' if logger.getEffectiveLevel() <= logging.DEBUG: msg += '\t\n Adding states {i} and '.format(i=i) for r in new_idx: msg += '{r} and '.format(r=r) msg += '\n' logger.debug(msg) for k in np.setdiff1d(old_adj, [i,n_cells-1]): # Every "old" neighbor must be the neighbor # of at least one of the new if pc.is_adjacent(sol[i], sol[k]): adj[i, k] = 1 adj[k, i] = 1 elif remove_trans and (trans_length == 1): # Actively remove transitions between non-neighbors transitions[i, k] = 0 transitions[k, i] = 0 for r in new_idx: if pc.is_adjacent(sol[r], sol[k]): adj[r, k] = 1 adj[k, r] = 1 elif remove_trans and (trans_length == 1): # Actively remove transitions between non-neighbors transitions[r, k] = 0 transitions[k, r] = 0 """Update IJ matrix""" IJ = np.pad(IJ, (0,num_new), 'constant') adj_k = reachable_within(trans_length, adj, adj) sym_adj_change(IJ, adj_k, transitions, i) for r in new_idx: sym_adj_change(IJ, adj_k, transitions, r) if logger.getEffectiveLevel() <= logging.DEBUG: msg = '\n\n Updated adj: \n{adj}'.format(adj=adj) msg += '\n\n Updated trans: \n{trans}'.format(trans= transitions) msg += '\n\n Updated IJ: \n{IJ}'.format(IJ=IJ) logger.debug(msg) logger.info('Divided region: {i}\n'.format(i=i)) elif vol2 < abs_tol: logger.info('Found: {i} ---> {j}\n'.format(i=i, j=j)) transitions[j,i] = 1 else: if logger.level <= logging.DEBUG: msg = '\t Unreachable: {i} --X--> {j}\n'.format(i=i, j=j) msg += '\t\t diff vol: {vol2}\n'.format(vol2=vol2) msg += '\t\t intersect vol: {vol1}\n'.format(vol1=vol1) logger.debug(msg) else: logger.info('\t unreachable\n') transitions[j,i] = 0 # check to avoid overlapping Regions if debug: tmp_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) assert(tmp_part.is_partition() ) n_cells = len(sol) progress_ratio = 1 - float(np.sum(IJ) ) /n_cells**2 progress += [progress_ratio] msg = '\t total # polytopes: {n_cells}\n'.format(n_cells=n_cells) msg += '\t progress ratio: {pr}\n'.format(pr=progress_ratio) logger.info(msg) iter_count += 1 # no plotting ? if not plotit: continue if plt is None or plot_partition is None: continue if iter_count % plot_every != 0: continue tmp_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) # plot pair under reachability check ax2.clear() si_tmp.plot(ax=ax2, color='green') sj_tmp.plot(ax2, color='red', hatch='o', alpha=0.5) plot_transition_arrow(si_tmp, sj_tmp, ax2) S0.plot(ax2, color='none', hatch='/', alpha=0.3) fig.canvas.draw() # plot partition ax1.clear() plot_partition(tmp_part, transitions.T, ax=ax1, color_seed=23) # plot dynamics ssys.plot(ax1, show_domain=False) # plot hatched continuous propositions part.plot_props(ax1) fig.canvas.draw() # scale view based on domain, # not only the current polytopes si, sj l,u = part.domain.bounding_box ax2.set_xlim(l[0,0], u[0,0]) ax2.set_ylim(l[1,0], u[1,0]) if save_img: fname = 'movie' +str(iter_count).zfill(3) fname += '.' + file_extension fig.savefig(fname, dpi=250) plt.pause(1) new_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) # check completeness of adjacency matrix if debug: tmp_part = deepcopy(new_part) tmp_part.compute_adj() # Generate transition system and add transitions ofts = trs.FTS() adj = sp.lil_matrix(transitions.T) n = adj.shape[0] ofts_states = range(n) ofts.states.add_from(ofts_states) ofts.transitions.add_adj(adj, ofts_states) # Decorate TS with state labels atomic_propositions = set(part.prop_regions) ofts.atomic_propositions.add_from(atomic_propositions) for state, region in zip(ofts_states, sol): state_prop = region.props.copy() ofts.states.add(state, ap=state_prop) param = { 'N':N, 'trans_length':trans_length, 'closed_loop':closed_loop, 'conservative':conservative, 'use_all_horizon':use_all_horizon, 'min_cell_volume':min_cell_volume, 'max_num_poly':max_num_poly } ppp2orig = [part2orig[x] for x in orig] end_time = os.times()[0] msg = 'Total abstraction time: {time}[sec]'.format(time= end_time - start_time) print(msg) logger.info(msg) if save_img and plt is not None: fig, ax = plt.subplots(1, 1) plt.plot(progress) ax.set_xlabel('iteration') ax.set_ylabel('progress ratio') ax.figure.savefig('progress.pdf') return AbstractPwa( ppp=new_part, ts=ofts, ppp2ts=ofts_states, pwa=ssys, pwa_ppp=part, ppp2pwa=orig, ppp2sys=subsys_list, orig_ppp=orig_ppp, ppp2orig=ppp2orig, disc_params=param ) def _discretize_dual( part, ssys, N=10, min_cell_volume=0.1, closed_loop=True, conservative=False, max_num_poly=5, use_all_horizon=False, trans_length=1, remove_trans=False, abs_tol=1e-7, plotit=False, save_img=False, cont_props=None, plot_every=1 ): """Refine the partition and establish transitions based on reachability analysis. Use dual-simulation algorithm. Reference ========= 1. U{[NOTM12] <https://tulip-control.sourceforge.io/doc/bibliography.html#notm12>} 2. Wagenmaker, A. J.; Ozay, N. "A Bisimulation-like Algorithm for Abstracting Control Systems." 54th Annual Allerton Conference on CCC 2016 See Also ======== L{prop2partition.pwa_partition}, L{prop2partition.part2convex} @param part: L{PropPreservingPartition} object @param ssys: L{LtiSysDyn} or L{PwaSysDyn} object @param N: horizon length @param min_cell_volume: the minimum volume of cells in the resulting partition. @param closed_loop: boolean indicating whether the `closed loop` algorithm should be used. default True. @param conservative: if true, force sequence in reachability analysis to stay inside starting cell. If false, safety is ensured by keeping the sequence inside a convexified version of the original proposition preserving cell. @param max_num_poly: maximum number of polytopes in a region to use in reachability analysis. @param use_all_horizon: in closed loop algorithm: if we should look for reachability also in less than N steps. @param trans_length: the number of polytopes allowed to cross in a transition. a value of 1 checks transitions only between neighbors, a value of 2 checks neighbors of neighbors and so on. @param remove_trans: if True, remove found transitions between non-neighbors. @param abs_tol: maximum volume for an "empty" polytope @param plotit: plot partitioning as it evolves @type plotit: boolean, default = False @param save_img: save snapshots of partitioning to PDF files, requires plotit=True @type save_img: boolean, default = False @param cont_props: continuous propositions to plot @type cont_props: list of C{Polytope} @param simu_type: flag used to choose abstraction algorithm (bisimulation or dual-simulation). @type simu_type: string, 'bi' or 'dual' default = 'bi' @rtype: L{AbstractPwa} """ start_time = os.times()[0] orig_ppp = part min_cell_volume = (min_cell_volume /np.finfo(np.double).eps *np.finfo(np.double).eps) ispwa = isinstance(ssys, PwaSysDyn) islti = isinstance(ssys, LtiSysDyn) if ispwa: (part, ppp2pwa, part2orig) = pwa_partition(ssys, part) else: part2orig = range(len(part)) # Save original polytopes, require them to be convex if conservative: orig_list = None orig = [0] else: (part, new2old) = part2convex(part) # convexify part2orig = [part2orig[i] for i in new2old] # map new regions to pwa subsystems if ispwa: ppp2pwa = [ppp2pwa[i] for i in new2old] remove_trans = False # already allowed in nonconservative orig_list = [] for poly in part: if len(poly) == 0: orig_list.append(poly.copy()) elif len(poly) == 1: orig_list.append(poly[0].copy()) else: raise Exception("discretize: " "problem in convexification") orig = list(range(len(orig_list))) # Cheby radius of disturbance set # (defined within the loop for pwa systems) if islti: if len(ssys.E) > 0: rd = ssys.Wset.chebR else: rd = 0. # Initialize matrix for pairs to check IJ = part.adj.copy() IJ = IJ.todense() IJ = np.array(IJ) logger.debug("\n Starting IJ: \n" + str(IJ) ) # next line omitted in discretize_overlap IJ = reachable_within(trans_length, IJ, np.array(part.adj.todense())) # Initialize output num_regions = len(part) transitions = np.zeros( [num_regions, num_regions], dtype = int ) sol = deepcopy(part.regions) adj = part.adj.copy() adj = adj.todense() adj = np.array(adj) # next 2 lines omitted in discretize_overlap if ispwa: subsys_list = list(ppp2pwa) else: subsys_list = None ss = ssys # init graphics if plotit: try: import matplotlib.pyplot as plt plt.ion() fig, (ax1, ax2) = plt.subplots(1, 2) ax1.axis('scaled') ax2.axis('scaled') file_extension = 'pdf' except: logger.error('failed to import matplotlib') plt = None else: plt = None iter_count = 0 # List of how many "new" regions # have been created for each region # and a list of original number of neighbors #num_new_reg = np.zeros(len(orig_list)) #num_orig_neigh = np.sum(adj, axis=1).flatten() - 1 progress = list() # Do the abstraction while np.sum(IJ) > 0: ind = np.nonzero(IJ) # i,j swapped in discretize_overlap i = ind[1][0] j = ind[0][0] IJ[j, i] = 0 si = sol[i] sj = sol[j] si_tmp = deepcopy(si) sj_tmp = deepcopy(sj) #num_new_reg[i] += 1 #print(num_new_reg) if ispwa: ss = ssys.list_subsys[subsys_list[i]] if len(ss.E) > 0: rd, xd = pc.cheby_ball(ss.Wset) else: rd = 0. if conservative: # Don't use trans_set trans_set = None else: # Use original cell as trans_set trans_set = orig_list[orig[i]] S0 = solve_feasible( si, sj, ss, N, closed_loop, use_all_horizon, trans_set, max_num_poly ) msg = '\n Working with partition cells: {i}, {j}'.format(i=i, j=j) logger.info(msg) msg = '\t{i} (#polytopes = {num}), and:\n'.format(i=i, num=len(si)) msg += '\t{j} (#polytopes = {num})\n'.format(j=j, num=len(sj)) if ispwa: msg += '\t with active subsystem: ' msg += '{sys}\n'.format(sys=subsys_list[i]) msg += '\t Computed reachable set S0 with volume: ' msg += '{vol}\n'.format(vol=S0.volume) logger.debug(msg) #logger.debug(r'si \cap s0') isect = si.intersect(S0) vol1 = isect.volume risect, xi = pc.cheby_ball(isect) #logger.debug(r'si \ s0') rsi, xd = pc.cheby_ball(si) vol2 = si.volume-vol1 # not accurate. need to check polytope class if vol1 <= min_cell_volume: logger.warning('\t too small: si \\cap Pre(sj), ' 'so discard intersection') if vol1 <= min_cell_volume and isect: logger.warning('\t discarded non-empty intersection: ' 'consider reducing min_cell_volume') if vol2 <= min_cell_volume: logger.warning('\t too small: si \\ Pre(sj), so not reached it') # indicate if S0 has exists in sol check_isect = False # We don't want our partitions to be smaller than the disturbance set # Could be a problem since cheby radius is calculated for smallest # convex polytope, so if we have a region we might throw away a good # cell. if ( vol1 > min_cell_volume and risect > rd and vol2 > min_cell_volume and rsi > rd): # check if the intersection has existed in current partitions for idx in range(len(sol)): if(sol[idx] == isect): logger.info('Found: {idx} ---> {j} '.format(idx=idx, j=j)) logger.info('intersection exists.\n') transitions[j, idx] = 1 check_isect = True if not check_isect: # Make sure new areas are Regions and add proposition lists if len(isect) == 0: isect = pc.Region([isect], si.props) else: isect.props = si.props.copy() # add intersection in sol isect_list = pc.separate(isect) sol.append(isect_list[0]) n_cells = len(sol) new_idx = n_cells-1 """Update adjacency matrix""" old_adj = np.nonzero(adj[i, :])[0] adj = np.pad(adj, (0, 1), 'constant') # cell i and new_idx are adjacent adj[i, new_idx] = 1 adj[new_idx, i] = 1 adj[new_idx, new_idx] = 1 if not conservative: orig = np.hstack([orig, orig[i]]) msg = '' if logger.getEffectiveLevel() <= logging.DEBUG: msg += '\t\n Adding states {new_idx}\n'.format(new_idx= new_idx) logger.debug(msg) for k in np.setdiff1d(old_adj, [i,n_cells-1]): # Every "old" neighbor must be the neighbor # of at least one of the new if pc.is_adjacent(sol[new_idx], sol[k]): adj[new_idx, k] = 1 adj[k, new_idx] = 1 elif remove_trans and (trans_length == 1): # Actively remove transitions between non-neighbors transitions[new_idx, k] = 0 transitions[k, new_idx] = 0 """Update transition matrix""" transitions = np.pad(transitions, (0,1), 'constant') adj_k = reachable_within(trans_length, adj, adj) # transitions i ---> k for k is neighbor of new_idx should be # kept by new_idx transitions[:, new_idx] = np.multiply(transitions[:, i], adj_k[:, i]) # if j and new_idx are neighbor, then add new_idx ---> j if adj_k[j, new_idx] != 0: transitions[j, new_idx] = 1 """Update IJ matrix""" IJ = np.pad(IJ, (0, 1), 'constant') sym_adj_change(IJ, adj_k, transitions, i) sym_adj_change(IJ, adj_k, transitions, new_idx) if logger.getEffectiveLevel() <= logging.DEBUG: msg = '\n\n Updated adj: \n{adj}'.format(adj=adj) msg += '\n\n Updated trans: \n{trans}'.format(trans= transitions) msg += '\n\n Updated IJ: \n{IJ}'.format(IJ=IJ) logger.debug(msg) logger.info('Divided region: {i}\n'.format(i=i)) elif vol2 < abs_tol: logger.info('Found: {i} ---> {j}\n'.format(i=i, j=j)) transitions[j, i] = 1 else: if logger.level <= logging.DEBUG: msg = '\t Unreachable: {i} --X--> {j}\n'.format(i=i, j=j) msg += '\t\t diff vol: {vol2}\n'.format(vol2=vol2) msg += '\t\t intersect vol: {vol1}\n'.format(vol1=vol1) logger.debug(msg) else: logger.info('\t unreachable\n') transitions[j, i] = 0 # check to avoid overlapping Regions if debug: tmp_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) assert(tmp_part.is_partition() ) n_cells = len(sol) progress_ratio = 1 - float(np.sum(IJ) ) /n_cells**2 progress += [progress_ratio] msg = '\t total # polytopes: {n_cells}\n'.format(n_cells=n_cells) msg += '\t progress ratio: {pr}\n'.format(pr=progress_ratio) logger.info(msg) iter_count += 1 # needs to be removed later # if(iter_count>=700): # break # no plotting ? if not plotit: continue if plt is None or plot_partition is None: continue if iter_count % plot_every != 0: continue tmp_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) # plot pair under reachability check ax2.clear() si_tmp.plot(ax=ax2, color='green') sj_tmp.plot(ax2, color='red', hatch='o', alpha=0.5) plot_transition_arrow(si_tmp, sj_tmp, ax2) S0.plot(ax2, color='none', hatch='/', alpha=0.3) fig.canvas.draw() # plot partition ax1.clear() plot_partition(tmp_part, transitions.T, ax=ax1, color_seed=23) # plot dynamics ssys.plot(ax1, show_domain=False) # plot hatched continuous propositions part.plot_props(ax1) fig.canvas.draw() # scale view based on domain, # not only the current polytopes si, sj l,u = part.domain.bounding_box ax2.set_xlim(l[0,0], u[0,0]) ax2.set_ylim(l[1,0], u[1,0]) if save_img: fname = 'movie' +str(iter_count).zfill(3) fname += '.' + file_extension fig.savefig(fname, dpi=250) plt.pause(1) new_part = PropPreservingPartition( domain=part.domain, regions=sol, adj=sp.lil_matrix(adj), prop_regions=part.prop_regions ) # check completeness of adjacency matrix if debug: tmp_part = deepcopy(new_part) tmp_part.compute_adj() # Generate transition system and add transitions ofts = trs.FTS() adj = sp.lil_matrix(transitions.T) n = adj.shape[0] ofts_states = range(n) ofts.states.add_from(ofts_states) ofts.transitions.add_adj(adj, ofts_states) # Decorate TS with state labels atomic_propositions = set(part.prop_regions) ofts.atomic_propositions.add_from(atomic_propositions) for state, region in zip(ofts_states, sol): state_prop = region.props.copy() ofts.states.add(state, ap=state_prop) param = { 'N':N, 'trans_length':trans_length, 'closed_loop':closed_loop, 'conservative':conservative, 'use_all_horizon':use_all_horizon, 'min_cell_volume':min_cell_volume, 'max_num_poly':max_num_poly } ppp2orig = [part2orig[x] for x in orig] end_time = os.times()[0] msg = 'Total abstraction time: {t} [sec]'.format( t=end_time - start_time) print(msg) logger.info(msg) if save_img and plt is not None: fig, ax = plt.subplots(1, 1) plt.plot(progress) ax.set_xlabel('iteration') ax.set_ylabel('progress ratio') ax.figure.savefig('progress.pdf') return AbstractPwa( ppp=new_part, ts=ofts, ppp2ts=ofts_states, pwa=ssys, pwa_ppp=part, ppp2pwa=orig, ppp2sys=subsys_list, orig_ppp=orig_ppp, ppp2orig=ppp2orig, disc_params=param ) def reachable_within(trans_length, adj_k, adj): """Find cells reachable within trans_length hops. """ if trans_length <= 1: return adj_k k = 1 while k < trans_length: adj_k = (np.dot(adj_k, adj)!=0).astype(int) k += 1 adj_k = (adj_k > 0).astype(int) return adj_k def sym_adj_change(IJ, adj_k, transitions, i): horizontal = adj_k[i, :] -transitions[i, :] > 0 vertical = adj_k[:, i] -transitions[:, i] > 0 IJ[i, :] = horizontal.astype(int) IJ[:, i] = vertical.astype(int) # DEFUNCT until further notice def discretize_overlap(closed_loop=False, conservative=False): """default False. UNDER DEVELOPMENT; function signature may change without notice. Calling will result in NotImplementedError. """ raise NotImplementedError # # if rdiff < abs_tol: # logger.info("Transition found") # transitions[i,j] = 1 # # elif ((vol1 > min_cell_volume) & (risect > rd) & # (num_new_reg[i] <= num_orig_neigh[i]+1)): # # # Make sure new cell is Region and add proposition lists # if len(isect) == 0: # isect = pc.Region([isect], si.props) # else: # isect.props = si.props.copy() # # # Add new state # sol.append(isect) # size = len(sol) # # # Add transitions # transitions = np.hstack([transitions, np.zeros([size - 1, 1], # dtype=int) ]) # transitions = np.vstack([transitions, np.zeros([1, size], # dtype=int) ]) # # # All sets reachable from orig cell are reachable from both cells # transitions[size-1,:] = transitions[i,:] # transitions[size-1,j] = 1 # j is reachable from new cell # # # Take care of adjacency # old_adj = np.nonzero(adj[i,:])[0] # # adj = np.hstack([adj, np.zeros([size - 1, 1], dtype=int) ]) # adj = np.vstack([adj, np.zeros([1, size], dtype=int) ]) # adj[i,size-1] = 1 # adj[size-1,i] = 1 # adj[size-1,size-1] = 1 # # for k in np.setdiff1d(old_adj,[i,size-1]): # if pc.is_adjacent(sol[size-1],sol[k],overlap=True): # adj[size-1,k] = 1 # adj[k, size-1] = 1 # else: # # Actively remove (valid) transitions between non-neighbors # transitions[size-1,k] = 0 # transitions[k,size-1] = 0 # # # Assign original proposition cell to new state and update counts # if not conservative: # orig = np.hstack([orig, orig[i]]) # print(num_new_reg) # num_new_reg = np.hstack([num_new_reg, 0]) # num_orig_neigh = np.hstack([num_orig_neigh, np.sum(adj[size-1,:])-1]) # # logger.info("\n Adding state " + str(size-1) + "\n") # # # Just add adjacent cells for checking, # # unless transition already found # IJ = np.hstack([IJ, np.zeros([size - 1, 1], dtype=int) ]) # IJ = np.vstack([IJ, np.zeros([1, size], dtype=int) ]) # horiz2 = adj[size-1,:] - transitions[size-1,:] > 0 # verti2 = adj[:,size-1] - transitions[:,size-1] > 0 # IJ[size-1,:] = horiz2.astype(int) # IJ[:,size-1] = verti2.astype(int) # else: # logger.info("No transition found, intersect vol: " + str(vol1) ) # transitions[i,j] = 0 # # new_part = PropPreservingPartition( # domain=part.domain, # regions=sol, adj=np.array([]), # trans=transitions, prop_regions=part.prop_regions, # original_regions=orig_list, orig=orig) # return new_part def multiproc_discretize(q, mode, ppp, cont_dyn, disc_params): global logger logger = mp.log_to_stderr() name = mp.current_process().name print('Abstracting mode: ' + str(mode) + ', on: ' + str(name)) absys = discretize(ppp, cont_dyn, **disc_params) q.put((mode, absys)) print('Worker: ' + str(name) + 'finished.') def multiproc_get_transitions( q, absys, mode, ssys, params ): global logger logger = mp.log_to_stderr() name = mp.current_process().name print('Merged transitions for mode: ' + str(mode) + ', on: ' + str(name)) trans = get_transitions(absys, mode, ssys, **params) q.put((mode, trans)) print('Worker: ' + str(name) + 'finished.') def multiproc_discretize_switched( ppp, hybrid_sys, disc_params=None, plot=False, show_ts=False, only_adjacent=True ): """Parallel implementation of discretize_switched. Uses the multiprocessing package. """ logger.info('parallel discretize_switched started') modes = list(hybrid_sys.modes) mode_nums = hybrid_sys.disc_domain_size q = mp.Queue() mode_args = dict() for mode in modes: cont_dyn = hybrid_sys.dynamics[mode] mode_args[mode] = (q, mode, ppp, cont_dyn, disc_params[mode]) jobs = [mp.Process(target=multiproc_discretize, args=args) for args in mode_args.values()] for job in jobs: job.start() # flush before join: # http://stackoverflow.com/questions/19071529/ abstractions = dict() for job in jobs: mode, absys = q.get() abstractions[mode] = absys for job in jobs: job.join() # merge their domains (merged_abstr, ap_labeling) = merge_partitions(abstractions) n = len(merged_abstr.ppp) logger.info('Merged partition has: ' + str(n) + ', states') # find feasible transitions over merged partition for mode in modes: cont_dyn = hybrid_sys.dynamics[mode] params = disc_params[mode] mode_args[mode] = (q, merged_abstr, mode, cont_dyn, params) jobs = [mp.Process(target=multiproc_get_transitions, args=args) for args in mode_args.values()] for job in jobs: job.start() trans = dict() for job in jobs: mode, t = q.get() trans[mode] = t # merge the abstractions, creating a common TS merge_abstractions(merged_abstr, trans, abstractions, modes, mode_nums) if plot: plot_mode_partitions(merged_abstr, show_ts, only_adjacent) return merged_abstr def discretize_switched( ppp, hybrid_sys, disc_params=None, plot=False, show_ts=False, only_adjacent=True ): """Abstract switched dynamics over given partition. @type ppp: L{PropPreservingPartition} @param hybrid_sys: dynamics of switching modes @type hybrid_sys: L{SwitchedSysDyn} @param disc_params: discretization parameters passed to L{discretize} for each mode. See L{discretize} for details. @type disc_params: dict (keyed by mode) of dicts. @param plot: save partition images @type plot: bool @param show_ts, only_adjacent: options for L{AbstractPwa.plot}. @return: abstracted dynamics, some attributes are dict keyed by mode @rtype: L{AbstractSwitched} """ if disc_params is None: disc_params = {'N':1, 'trans_length':1} logger.info('discretizing hybrid system') modes = list(hybrid_sys.modes) mode_nums = hybrid_sys.disc_domain_size # discretize each abstraction separately abstractions = dict() for mode in modes: logger.debug(30*'-'+'\n') logger.info('Abstracting mode: ' + str(mode)) cont_dyn = hybrid_sys.dynamics[mode] absys = discretize( ppp, cont_dyn, **disc_params[mode] ) logger.debug('Mode Abstraction:\n' + str(absys) +'\n') abstractions[mode] = absys # merge their domains (merged_abstr, ap_labeling) = merge_partitions(abstractions) n = len(merged_abstr.ppp) logger.info('Merged partition has: ' + str(n) + ', states') # find feasible transitions over merged partition trans = dict() for mode in modes: cont_dyn = hybrid_sys.dynamics[mode] params = disc_params[mode] trans[mode] = get_transitions( merged_abstr, mode, cont_dyn, N=params['N'], trans_length=params['trans_length'] ) # merge the abstractions, creating a common TS merge_abstractions(merged_abstr, trans, abstractions, modes, mode_nums) if plot: plot_mode_partitions(merged_abstr, show_ts, only_adjacent) return merged_abstr def plot_mode_partitions(swab, show_ts, only_adjacent): """Save each mode's partition and final merged partition. """ axs = swab.plot(show_ts, only_adjacent) if not axs: logger.error('failed to plot the partitions.') return n = len(swab.modes) assert(len(axs) == 2*n) # annotate for ax in axs: plot_annot(ax) # save mode partitions for ax, mode in zip(axs[:n], swab.modes): fname = 'merged_' + str(mode) + '.pdf' ax.figure.savefig(fname) # save merged partition for ax, mode in zip(axs[n:], swab.modes): fname = 'part_' + str(mode) + '.pdf' ax.figure.savefig(fname) def plot_annot(ax): fontsize = 5 for tick in ax.xaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) for tick in ax.yaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) ax.set_xlabel('$v_1$', fontsize=fontsize+6) ax.set_ylabel('$v_2$', fontsize=fontsize+6) def merge_abstractions(merged_abstr, trans, abstr, modes, mode_nums): """Construct merged transitions. @type merged_abstr: L{AbstractSwitched} @type abstr: dict of L{AbstractPwa} """ # TODO: check equality of atomic proposition sets aps = abstr[modes[0]].ts.atomic_propositions logger.info('APs: ' + str(aps)) sys_ts = trs.FTS() # create stats n = len(merged_abstr.ppp) states = range(n) sys_ts.states.add_from(states) sys_ts.atomic_propositions.add_from(aps) # copy AP labels from regions to discrete states ppp2ts = states for (i, state) in enumerate(ppp2ts): props = merged_abstr.ppp[i].props sys_ts.states[state]['ap'] = props # create mode actions sys_actions = [str(s) for e,s in modes] env_actions = [str(e) for e,s in modes] # no env actions ? if mode_nums[0] == 0: actions_per_mode = { (e,s):{'sys_actions':str(s)} for e,s in modes } sys_ts.sys_actions.add_from(sys_actions) elif mode_nums[1] == 0: # no sys actions actions_per_mode = { (e,s):{'env_actions':str(e)} for e,s in modes } sys_ts.env_actions.add_from(env_actions) else: actions_per_mode = { (e,s):{'env_actions':str(e), 'sys_actions':str(s)} for e,s in modes } sys_ts.env_actions.add_from([str(e) for e,s in modes]) sys_ts.sys_actions.add_from([str(s) for e,s in modes]) for mode in modes: env_sys_actions = actions_per_mode[mode] adj = trans[mode] sys_ts.transitions.add_adj( adj = adj, adj2states = states, **env_sys_actions ) merged_abstr.ts = sys_ts merged_abstr.ppp2ts = ppp2ts def get_transitions( abstract_sys, mode, ssys, N=10, closed_loop=True, trans_length=1 ): """Find which transitions are feasible in given mode. Used for the candidate transitions of the merged partition. @rtype: scipy.sparse.lil_matrix """ logger.info('checking which transitions remain feasible after merging') part = abstract_sys.ppp # Initialize matrix for pairs to check IJ = part.adj.copy() if trans_length > 1: k = 1 while k < trans_length: IJ = np.dot(IJ, part.adj) k += 1 IJ = (IJ > 0).astype(int) # Initialize output n = len(part) transitions = sp.lil_matrix((n, n), dtype=int) # Do the abstraction n_checked = 0 n_found = 0 while np.sum(IJ) > 0: n_checked += 1 ind = np.nonzero(IJ) i = ind[1][0] j = ind[0][0] IJ[j,i] = 0 logger.debug('checking transition: ' + str(i) + ' -> ' + str(j)) si = part[i] sj = part[j] # Use original cell as trans_set trans_set = abstract_sys.ppp2pwa(mode, i)[1] active_subsystem = abstract_sys.ppp2sys(mode, i)[1] trans_feasible = is_feasible( si, sj, active_subsystem, N, closed_loop = closed_loop, trans_set = trans_set ) if trans_feasible: transitions[i, j] = 1 msg = '\t Feasible transition.' n_found += 1 else: transitions[i, j] = 0 msg = '\t Not feasible transition.' logger.debug(msg) logger.info('Checked: ' + str(n_checked)) logger.info('Found: ' + str(n_found)) assert n_checked != 0, 'would divide ' logger.info('Survived merging: ' + str(float(n_found) / n_checked) + ' % ') return transitions def multiproc_merge_partitions(abstractions): """LOGTIME in #processors parallel merging. Assuming sufficient number of processors. UNDER DEVELOPMENT; function signature may change without notice. Calling will result in NotImplementedError. """ raise NotImplementedError def merge_partitions(abstractions): """Merge multiple abstractions. @param abstractions: keyed by mode @type abstractions: dict of L{AbstractPwa} @return: (merged_abstraction, ap_labeling) where: - merged_abstraction: L{AbstractSwitched} - ap_labeling: dict """ if len(abstractions) == 0: warnings.warn('Abstractions empty, nothing to merge.') return # consistency check for ab1 in abstractions.values(): for ab2 in abstractions.values(): p1 = ab1.ppp p2 = ab2.ppp if p1.prop_regions != p2.prop_regions: msg = 'merge: partitions have different sets ' msg += 'of continuous propositions' raise Exception(msg) if ( not (p1.domain.A == p2.domain.A).all() or not (p1.domain.b == p2.domain.b).all()): raise Exception('merge: partitions have different domains') # check equality of original PPP partitions if ab1.orig_ppp == ab2.orig_ppp: logger.info('original partitions happen to be equal') init_mode = list(abstractions.keys())[0] all_modes = set(abstractions) remaining_modes = all_modes.difference(set([init_mode])) print('init mode: ' + str(init_mode)) print('all modes: ' + str(all_modes)) print('remaining modes: ' + str(remaining_modes)) # initialize iteration data prev_modes = [init_mode] # Create a list of merged-together regions ab0 = abstractions[init_mode] regions = list(ab0.ppp) parents = {init_mode:list(range(len(regions) ))} ap_labeling = {i:reg.props for i,reg in enumerate(regions)} for cur_mode in remaining_modes: ab2 = abstractions[cur_mode] r = merge_partition_pair( regions, ab2, cur_mode, prev_modes, parents, ap_labeling ) regions, parents, ap_labeling = r prev_modes += [cur_mode] new_list = regions # build adjacency based on spatial adjacencies of # component abstractions. # which justifies the assumed symmetry of part1.adj, part2.adj # Basically, if two regions are either 1) part of the same region in one of # the abstractions or 2) adjacent in one of the abstractions, then the two # regions are adjacent in the switched dynamics. n_reg = len(new_list) adj = np.zeros([n_reg, n_reg], dtype=int) for i, reg_i in enumerate(new_list): for j, reg_j in enumerate(new_list[0:i]): touching = False for mode in abstractions: pi = parents[mode][i] pj = parents[mode][j] part = abstractions[mode].ppp if (part.adj[pi, pj] == 1) or (pi == pj): touching = True break if not touching: continue if pc.is_adjacent(reg_i, reg_j): adj[i,j] = 1 adj[j,i] = 1 adj[i,i] = 1 ppp = PropPreservingPartition( domain=ab0.ppp.domain, regions=new_list, prop_regions=ab0.ppp.prop_regions, adj=adj ) abstraction = AbstractSwitched( ppp=ppp, modes=abstractions, ppp2modes=parents, ) return (abstraction, ap_labeling) def merge_partition_pair( old_regions, ab2, cur_mode, prev_modes, old_parents, old_ap_labeling ): """Merge an Abstraction with the current partition iterate. @param old_regions: A list of C{Region} that is from either: 1. The ppp of the first (initial) L{AbstractPwa} to be merged. 2. A list of already-merged regions @type old_regions: list of C{Region} @param ab2: Abstracted piecewise affine dynamics to be merged into the @type ab2: L{AbstractPwa} @param cur_mode: mode to be merged @type cur_mode: tuple @param prev_modes: list of modes that have already been merged together @type prev_modes: list of tuple @param old_parents: dict of modes that have already been merged to dict of indices of new regions to indices of regions @type old_parents: dict of modes to list of region indices in list C{old_regions} or dict of region indices to regions in original ppp for that mode @param old_ap_labeling: dict of states of already-merged modes to sets of propositions for each state @type old_ap_labeling: dict of tuples to sets @return: the following: - C{new_list}, list of new regions - C{parents}, same as input param C{old_parents}, except that it includes the mode that was just merged and for list of regions in return value C{new_list} - C{ap_labeling}, same as input param C{old_ap_labeling}, except that it includes the mode that was just merged. """ logger.info('merging partitions') part2 = ab2.ppp modes = prev_modes + [cur_mode] new_list = [] parents = {mode:dict() for mode in modes} ap_labeling = dict() for i in range(len(old_regions)): for j in range(len(part2)): isect = pc.intersect(old_regions[i], part2[j]) rc, xc = pc.cheby_ball(isect) # no intersection ? if rc < 1e-5: continue logger.info('merging region: A' + str(i) + ', with: B' + str(j)) # if Polytope, make it Region if len(isect) == 0: isect = pc.Region([isect]) # label the Region with propositions isect.props = old_regions[i].props.copy() new_list.append(isect) idx = new_list.index(isect) # keep track of parents for mode in prev_modes: parents[mode][idx] = old_parents[mode][i] parents[cur_mode][idx] = j # union of AP labels from parent states ap_label_1 = old_ap_labeling[i] ap_label_2 = ab2.ts.states[j]['ap'] logger.debug('AP label 1: ' + str(ap_label_1)) logger.debug('AP label 2: ' + str(ap_label_2)) # original partitions may be different if pwa_partition used # but must originate from same initial partition, # i.e., have same continuous propositions, checked above # # so no two intersecting regions can have different AP labels, # checked here if ap_label_1 != ap_label_2: msg = 'Inconsistent AP labels between intersecting regions\n' msg += 'of partitions of switched system.' raise Exception(msg) ap_labeling[idx] = ap_label_1 return new_list, parents, ap_labeling
bsd-3-clause
michaelaye/scikit-image
doc/examples/plot_gabor.py
11
4450
""" ============================================= Gabor filter banks for texture classification ============================================= In this example, we will see how to classify textures based on Gabor filter banks. Frequency and orientation representations of the Gabor filter are similar to those of the human visual system. The images are filtered using the real parts of various different Gabor filter kernels. The mean and variance of the filtered images are then used as features for classification, which is based on the least squared error for simplicity. """ from __future__ import print_function import matplotlib.pyplot as plt import numpy as np from scipy import ndimage as ndi from skimage import data from skimage.util import img_as_float from skimage.filters import gabor_kernel def compute_feats(image, kernels): feats = np.zeros((len(kernels), 2), dtype=np.double) for k, kernel in enumerate(kernels): filtered = ndi.convolve(image, kernel, mode='wrap') feats[k, 0] = filtered.mean() feats[k, 1] = filtered.var() return feats def match(feats, ref_feats): min_error = np.inf min_i = None for i in range(ref_feats.shape[0]): error = np.sum((feats - ref_feats[i, :])**2) if error < min_error: min_error = error min_i = i return min_i # prepare filter bank kernels kernels = [] for theta in range(4): theta = theta / 4. * np.pi for sigma in (1, 3): for frequency in (0.05, 0.25): kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) kernels.append(kernel) shrink = (slice(0, None, 3), slice(0, None, 3)) brick = img_as_float(data.load('brick.png'))[shrink] grass = img_as_float(data.load('grass.png'))[shrink] wall = img_as_float(data.load('rough-wall.png'))[shrink] image_names = ('brick', 'grass', 'wall') images = (brick, grass, wall) # prepare reference features ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double) ref_feats[0, :, :] = compute_feats(brick, kernels) ref_feats[1, :, :] = compute_feats(grass, kernels) ref_feats[2, :, :] = compute_feats(wall, kernels) print('Rotated images matched against references using Gabor filter banks:') print('original: brick, rotated: 30deg, match result: ', end='') feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels) print(image_names[match(feats, ref_feats)]) print('original: brick, rotated: 70deg, match result: ', end='') feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels) print(image_names[match(feats, ref_feats)]) print('original: grass, rotated: 145deg, match result: ', end='') feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels) print(image_names[match(feats, ref_feats)]) def power(image, kernel): # Normalize images for better comparison. image = (image - image.mean()) / image.std() return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 + ndi.convolve(image, np.imag(kernel), mode='wrap')**2) # Plot a selection of the filter bank kernels and their responses. results = [] kernel_params = [] for theta in (0, 1): theta = theta / 4. * np.pi for frequency in (0.1, 0.4): kernel = gabor_kernel(frequency, theta=theta) params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency) kernel_params.append(params) # Save kernel and the power image for each image results.append((kernel, [power(img, kernel) for img in images])) fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6)) plt.gray() fig.suptitle('Image responses for Gabor filter kernels', fontsize=12) axes[0][0].axis('off') # Plot original images for label, img, ax in zip(image_names, images, axes[0][1:]): ax.imshow(img) ax.set_title(label, fontsize=9) ax.axis('off') for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]): # Plot Gabor kernel ax = ax_row[0] ax.imshow(np.real(kernel), interpolation='nearest') ax.set_ylabel(label, fontsize=7) ax.set_xticks([]) ax.set_yticks([]) # Plot Gabor responses with the contrast normalized for each filter vmin = np.min(powers) vmax = np.max(powers) for patch, ax in zip(powers, ax_row[1:]): ax.imshow(patch, vmin=vmin, vmax=vmax) ax.axis('off') plt.show()
bsd-3-clause
johnbachman/rasmodel
gxp_exchange.py
6
2587
from rasmodel.scenarios.default import model import numpy as np from matplotlib import pyplot as plt from pysb.integrate import Solver from pysb import * from tbidbaxlipo.util import fitting # Zero out all initial conditions for ic in model.initial_conditions: ic[1].value = 0 KRAS = model.monomers['KRAS'] GDP = model.monomers['GDP'] GTP = model.monomers['GTP'] Expression('KRAS_mGXP_', model.observables['KRAS_mGTP_closed_'] + model.observables['KRAS_mGDP_closed_']) # Add an initial condition for HRAS with GDP or GTP pre-bound # (Concentration units in nM) Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None, mutant='WT') % GDP(p=1, label='n'), Parameter('KRAS_WT_GDP_0', 0.)) Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None, mutant='G13D') % GDP(p=1, label='n'), Parameter('KRAS_G13D_GDP_0', 0.)) Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None, mutant='WT') % GTP(p=1, label='n'), Parameter('KRAS_WT_GTP_0', 0.)) Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None, mutant='G13D') % GTP(p=1, label='n'), Parameter('KRAS_G13D_GTP_0', 0.)) plt.ion() # First simulate the data from Figure 1A (GDP exchange) # WT, GDP: model.parameters['mGDP_0'].value = 1500. model.parameters['KRAS_WT_GDP_0'].value = 750. t = np.linspace(0, 1000, 1000) # 1000 seconds sol = Solver(model, t) sol.run() plt.figure() plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='WT') # G13D, GDP: model.parameters['KRAS_WT_GDP_0'].value = 0 model.parameters['KRAS_G13D_GDP_0'].value = 750. sol.run() plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='G13D') plt.legend(loc='lower right') plt.title('GDP exchange') plt.xlabel('Time (s)') plt.ylabel('[Bound mGDP] (nM)') plt.show() # Now simulate the data from Figure 1B (GTP exchange) # WT, GTP model.parameters['mGDP_0'].value = 0. model.parameters['mGTP_0'].value = 1500. model.parameters['KRAS_WT_GDP_0'].value = 0. model.parameters['KRAS_G13D_GDP_0'].value = 0. model.parameters['KRAS_WT_GTP_0'].value = 750. model.parameters['KRAS_G13D_GTP_0'].value = 0. sol.run() plt.figure() plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='WT') # G13D, GTP model.parameters['KRAS_WT_GTP_0'].value = 0. model.parameters['KRAS_G13D_GTP_0'].value = 750. sol.run() plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='G13D') plt.legend(loc='lower right') plt.title('GTP exchange') plt.xlabel('Time (s)') plt.ylabel('[Bound mGTP] (nM)') plt.show()
mit
haphaeu/yoshimi
PandasDataFrame/gumbel_dataframe.py
1
4150
# -*- coding: utf-8 -*- """ Trying to improve the Gumbel module. The idea is to apply functions staight from results.txt to get to results. Created on Mon Jul 17 11:44:58 2017 @author: rarossi """ # %% import pandas as pd import numpy as np from scipy import stats as ss gamma = 0.5772 # Euler constant need_more_seeds = 'need larger sample for this fractile' use_sample = 'use sample' pns = [0.75, 0.8, 0.9, 0.99] # %% # df = pd.read_table('results_mini.txt') df = pd.read_table('results.txt') keys = ['WaveHs', 'WaveTp', 'WaveDirection'] df = df.set_index(keys=keys) # Split this in two dfs, one for max, one for min df_max = df[df.columns[['max' in i.lower() for i in df.columns.values]]] df_min = df[df.columns[['min' in i.lower() for i in df.columns.values]]] # %% # functions to be calculated for diagnosis def betaME(x): return np.std(x)*(np.sqrt(6))/np.pi def muME_min(x): return np.mean(x) + gamma*betaME(x) def muME_max(x): return np.mean(x) - gamma*betaME(x) def muMLE_min(x): return ss.gumbel_l.fit(x)[0] def betaMLE_min(x): return ss.gumbel_l.fit(x)[1] def muMLE_max(x): return ss.gumbel_r.fit(x)[0] def betaMLE_max(x): return ss.gumbel_r.fit(x)[1] def sample_min(x, pn=0.9): num_seeds = len(x) x = x.sort_values() i = int(num_seeds-pn*num_seeds) enoughSeeds = num_seeds >= round(1/(1-pn), 4) if not enoughSeeds: return need_more_seeds return x[i-1] def gME_min(x, pn=0.9): if 0 in x.values: return use_sample return ss.gumbel_l.ppf(1-pn, *ss.gumbel_l._fitstart(x)) def gMLE_min(x, pn=0.9): if 0 in x.values: return use_sample return ss.gumbel_l.ppf(1-pn, *ss.gumbel_l.fit(x)) def sample_max(x, pn=0.9): num_seeds = len(x) x = x.sort_values() i = int(pn*num_seeds) enoughSeeds = num_seeds >= round(1/(1-pn), 4) if not enoughSeeds: return need_more_seeds return x[i-1] def gME_max(x, pn=0.9): return ss.gumbel_r.ppf(pn, *ss.gumbel_r._fitstart(x)) def gMLE_max(x, pn=0.9): return ss.gumbel_r.ppf(pn, *ss.gumbel_r.fit(x)) agg_list_min = [np.std, np.mean, np.max, np.min, betaME, muME_min, betaMLE_min, muMLE_min] agg_list_max = [np.std, np.mean, np.max, np.min, betaME, muME_max, betaMLE_max, muMLE_max] for pn in pns: # All this thing with funcion names changes is because more than 1 lambda function # is not accepted by pandas in an aggregate list... label = '_{}'.format(pn).replace('.', '_') agg_list_min.extend([lambda x: gME_min(x, pn=pn), lambda x: gMLE_min(x, pn=pn), lambda x: sample_min(x, pn=pn)]) agg_list_min[-3].__name__ = 'gME_min'+label agg_list_min[-2].__name__ = 'gMLE_min'+label agg_list_min[-1].__name__ = 'sample_min'+label agg_list_max.extend([lambda x: gME_max(x, pn=pn), lambda x: gMLE_max(x, pn=pn), lambda x: sample_max(x, pn=pn)]) agg_list_max[-3].__name__ = 'gME_max'+label agg_list_max[-2].__name__ = 'gMLE_max'+label agg_list_max[-1].__name__ = 'sample_max'+label # %% # Do the magic - aggregate data frame from results.txt into the required output format df_res = pd.concat([df_min.groupby(by=keys).agg(agg_list_min), df_max.groupby(by=keys).agg(agg_list_max)], axis=1) # Finally write results to excel. # Note that sheet iterates through df.columns to keep original order of columns xl_writer = pd.ExcelWriter('lixo.xlsx') for sheet in df.columns: df_res[sheet].reset_index().to_excel(xl_writer, index=False, sheet_name=sheet) xl_writer.save() # %% # .rename(columns={'amax': 'max', 'amin': 'min'}) # Old studd # #df_min.groupby(by=keys).describe(percentiles=[0.01, 0.1, 0.5, 0.9, 0.99]).head() # # Create a multiindex object for the results dataframe ## mindex = pd.MultiIndex.from_product(iterables=[['stdev', 'mean', 'max', 'min'], df.columns]) # Create a template for the results DataFrame. Keep same index, use the multi-index as columns ## df_res = pd.DataFrame(index=df[~df.index.duplicated(keep='first')].index, columns=mindex)
lgpl-3.0
IPGP/webobs
CODE/python/AAA4webobs/automatic_processing/recording.py
1
19759
# -*-coding:Utf-8 -* # Last update: 03/2018 - Marielle MALFANTE # Contact: [email protected] (@gmail.com) # Copyright: Marielle MALFANTE - GIPSA-Lab # Univ. Grenoble Alpes, CNRS, Grenoble INP, GIPSA-lab, 38000 Grenoble, France from pathlib import Path import sys path = str(Path(Path(__file__).parent.absolute()).parent.absolute()) sys.path.insert(0,path) import datetime import numpy as np from DataReadingFunctions import * from os.path import isfile, isdir import matplotlib import pickle from features import FeatureVector import scipy.signal as sg import math from tools import butter_bandpass_filter, display_observation, getClasses from featuresFunctions import energy, energy_u from math import sqrt from os import mkdir from sklearn.metrics import confusion_matrix, accuracy_score import matplotlib.pyplot as plt class Recording: """ Object describing a recording to be analyzed. All .wav, .dat or other recording format will be converted into Recording object - path to recording file - data - fs - datetime de t_start - datetime de t_end - n_length """ def __init__(self,path_to_recording,config,verbatim=0): """ Initialization method """ # If no file if not isfile(path=path_to_recording): print("No file at %s, recording object could not be created"%path_to_recording) # Get reading function which is different for every application reading_function = config.data_to_analyze['reading_function'] self._verbatim = verbatim self.path = path_to_recording # Read the file [self.data, self.fs, self.t_start, self.t_end, self.length_n] = reading_function(path_to_recording,config,verbatim=verbatim) # Other stuff self.predictedProbas = None # Keeps all the output probabilities self.decidedClasses = None # Keeps only argmax(proba or -1 if unknown). Depends on getClasses function. self.associatedProbas = None # Proba associated to the decided class if verbatim > 1: print("\tRecording has been read and is of shape", np.shape(self.data)) def __repr__(self): """ Representation method (transform the object to str for display) """ s = 'Recording object of <path> %s, '%self.path + \ '<fs> %d, '%self.fs + \ '<t_start> ' + str(self.t_start) + ', <t_end> ' + str(self.t_end) + \ ', <length_n> %d, '%self.length_n + \ '<data> and <labels> are of shape ' + str(np.shape(self.data)) + str(np.shape((self.labels))) return s def analyze(self,analyzer,config,save=True): """ Continuous analyzis of recording object with - analyzer: analyzer object with all needed object to conduct the analysis - config: Configuration object containing the needed configuration details for the analysis (window length, etc) Results will be stored in self.labels. """ # Local variables n_classes = len(analyzer.labelEncoder.classes_) window_length_t = config.analysis['window_length'] delta = config.analysis['delta'] n_window = config.analysis['n_window'] n_bands = config.analysis['nBands'] saving_path = config.general['project_root'] + config.application['name'].upper() + '/' \ 'res/' + config.configuration_number + '/' + self.path.split('/')[-1] + '__RES.np' window_length_n = int(window_length_t * self.fs) # Self.predictedProbas is of shape (nBands, nData, nClasses) self.predictedProbas = np.array([[[None]*n_classes]*np.shape(self.data)[0]]*n_bands) self.decidedClasses = np.array([[None]*np.shape(self.data)[0]]*n_bands) self.associatedProbas = np.array([[None]*np.shape(self.data)[0]]*n_bands) # Multi-scale analysis not implemented yet if n_window != 1: print('\tMulti-scale analysis not implemented yet') return 1 # Analyze with sliding window # NB: FeatureVector is a pattern of features to be computed again for every new observation to analyze features = FeatureVector(config, verbatim=self._verbatim) for i_analyzed in range(0,self.length_n,delta): # Find signal piece to analyze i_start = i_analyzed - int(window_length_n/2) i_end = i_analyzed + int(window_length_n/2) # If not enough signal to analyze, unknown prediction if i_start < 0 or i_end > (self.length_n - 1): self.predictedProbas[:,i_analyzed] = [None]*n_classes continue # pass the rest of the loop for this iteration # Otherwise, get signal, and for each bandwidth: get features and make prediction and store predictions # Get signal signal = self.data[i_start:i_end] # Loop for the various bandwidths for i in range(n_bands): if self._verbatim > 2: print('\t\tData index: ', i_analyzed, '\tbandwidth: ', i) # Filtering: f_min = config.analysis['bandwidth']['f_min'][i] f_max = config.analysis['bandwidth']['f_max'][i] butter_order = config.analysis['butter_order'] signature = butter_bandpass_filter(signal, f_min, f_max, self.fs, order=butter_order) # Preprocessing if config.preprocessing['energy_norm']: E = energy(signature, arg_dict={'E_u':energy_u(signature)}) signature = signature / sqrt(E) # Get features features.compute(signature,self.fs) # Scale features features.featuresValues = analyzer.scaler.transform(features.featuresValues.reshape(1,-1)) # Make prediction (labels store all probas, predictedClass only the decided one) self.predictedProbas[i,i_analyzed] = analyzer.model.predict_proba(features.featuresValues) return def makeDecision(self, config): """ This method analyses the output probabilities stored in self.predictedProbas to decide on the final class (class or unknown). The probability associated to the prediction is also stores the associated probability. NB : This methods can be called if the object has previously been analyzed (or analyzed, saved and then loaded) """ (i_, j_, k_) = np.where(self.predictedProbas) for [i,j] in np.unique([[i,j] for (i,j) in zip(i_,j_)], axis=0): # DO NOT BREAK THAT LINE ( where only gets rid of the namy None values, but also of the many times when predicted proba = 0 ) if self._verbatim > 2: print('\t\tData index: ', j, '\tbandwidth: ',i) a,b = getClasses(self.predictedProbas[i][j].reshape(1,-1), threshold=config.features['thresholds'], thresholding=config.features['thresholding']) self.decidedClasses[i][j] = a[0] # Array returned but only one data considered here, so [0] self.associatedProbas[i][j] = b[0] # Array returned but only one data considered here, so [0] def save(self, config): """ Method used to save the object for later use (depending on the application, training can take a while and you might want to save the analyzer) """ path = config.general['project_root'] + config.application['name'].upper() + '/res/' + config.configuration_number + '/' + config.general['path_to_res'] savingPath = path+self.path.split('/')[-1]+'__res.rec' pickle.dump(self.__dict__,open(savingPath,'wb'),2) if self._verbatim > 1: print('\tRecording has been saved at: ', path) return def load(self, config): """ Method used to load the object. """ verbatim = self._verbatim path = config.general['project_root'] + config.application['name'].upper() + '/res/' + config.configuration_number + '/' + config.general['path_to_res'] savingPath = path+self.path.split('/')[-1]+'__res.rec' tmp_dict = pickle.load(open(savingPath,'rb')) self.__dict__.update(tmp_dict) self._verbatim = verbatim if self._verbatim > 1: print('\tRecording has been loaded from: ', path) def display(self, config, onlineDisplay=False, saveDisplay=True, forChecking=False, labelEncoder=None): """ Displays prediction results """ if forChecking: self._displayForChecking(config, labelEncoder=labelEncoder) else: self._displayForContinuousAnalysis(config, onlineDisplay=onlineDisplay, saveDisplay=saveDisplay) def _displayForChecking(self, config, labelEncoder): """ Save each observation separatly in class by class folders. Allows the expert revision of prediction results and accuracy measurement of the analysis. """ # Local variables n_classes = len(labelEncoder.classes_) window_length_t = config.analysis['window_length'] delta = config.analysis['delta'] n_window = config.analysis['n_window'] n_bands = config.analysis['nBands'] saving_path = config.general['project_root'] + config.application['name'].upper() + '/' \ 'res/' + config.configuration_number + '/' + self.path.split('/')[-1] + '__RES.np' window_length_n = int(window_length_t * self.fs) # Make saving folders if they do not exists path = config.general['project_root'] + config.application['name'].upper() + '/' + config.general['path_to_res'] + config.configuration_number + '/' + config.general['path_to_res_to_review'] path1 = path + 'to_review/' path2 = path + 'reviewed/' if not isdir(path): mkdir(path) if not isdir(path1): mkdir(path1) if not isdir(path2): mkdir(path2) for class_name in labelEncoder.classes_ : if not isdir(path1 + class_name + '/'): mkdir(path1 + class_name + '/') if config.features['thresholding']: if not isdir(path1 + 'unknown/'): mkdir(path1 + 'unknown/') # Displaying of each predicted observation for i_analyzed in range(0,self.length_n,delta): # Find signal piece to analyze i_start = i_analyzed - int(window_length_n/2) i_end = i_analyzed + int(window_length_n/2) # If not enough signal to analyze, unknown prediction if i_start < 0 or i_end > (self.length_n - 1): # self.predictedProbas[:,i_analyzed] = [None]*n_classes continue # pass the rest of the loop for this iteration # Otherwise, get signal, and for each bandwidth: get features and make prediction and store predictions # Get signal signal = self.data[i_start:i_end] # Loop for the various bandwidths for i in range(n_bands): if self._verbatim > 2: print('\t\tData index: ', i_analyzed, '\tbandwidth: ', i) # Filtering, actually no filtering for displaying f_min = config.analysis['bandwidth']['f_min'][i] f_max = config.analysis['bandwidth']['f_max'][i] # butter_order = config.analysis['butter_order'] # signature = butter_bandpass_filter(signal, f_min, f_max, self.fs, order=butter_order) signature = signal # Preprocessing if config.preprocessing['energy_norm']: E = energy(signature, arg_dict={'E_u':energy_u(signature)}) signature = signature / sqrt(E) # Get figure title and path # Get class name. Technically, should use the encoder and everything, but we don't want to have the analyzer here. # Let's keep it that way now, and we'll see if we change it class_name = [labelEncoder.inverse_transform(s) if s in range(len(labelEncoder.classes_)) else 'unknown' for s in [self.decidedClasses[i,i_analyzed]]][0] # class_name = str(self.decidedClasses[i,i_analyzed]) s = '_'.join(self.path.split('/')[-1].split('.')[0].split('__')[0].split('_')[1:2]) t_analyzed = i_analyzed/self.fs t_analyzed = np.round(t_analyzed,1) figure_title = '%s__%f'%(s,t_analyzed) + ' p(%d)=%f'%(self.decidedClasses[i,i_analyzed],self.associatedProbas[i,i_analyzed]) figure_title_extended = '%d__%d__%s__%f'%(f_min,f_max, \ self.path.split('/')[-1].split('.')[0], t_analyzed) figure_path = path1 + class_name + '/' + figure_title_extended # Get spectro signal needed for displaying i_start_signal_large = max(0,i_analyzed - int(window_length_n/2) - window_length_n) i_end_signal_large = min(i_analyzed + int(window_length_n/2) + window_length_n, self.length_n) signal_large = self.data[i_start_signal_large:i_end_signal_large] window_size_t = window_length_n / self.fs # Display and save the observation display_observation(signal_large, f_min, f_max, self.fs, window_size_t, \ config, figure_title, figure_path) def _displayForContinuousAnalysis(self, config, onlineDisplay=False, saveDisplay=True): """ Display prediction results with one file per recording object TODO: improve & finish """ # Local variables n_bands = config.analysis['nBands'] # Settings w_size = config.analysis['spectro_window_size'] fMax = config.analysis['f_max'] ratioDecimate = int(self.fs/(2*fMax)) # Make figure fig = plt.figure(figsize=(15,10)) gs = matplotlib.gridspec.GridSpec(3+n_bands*3,1,height_ratios=[3,2,0.7]+[1,3,0.5]*n_bands) # gs = matplotlib.gridspec.GridSpec(3+n_bands*3,1,height_ratios=[3,2,0.7]+[1,3,0.5,1,3]) # gs = matplotlib.gridspec.GridSpec(3,1,height_ratios=[3,2,0.7]) # gs = matplotlib.gridspec.GridSpec(2+n_bands*2,1,height_ratios=[3,2]+[1,3]*n_bands) # Time vector x = np.array([self.t_start + datetime.timedelta(seconds=i/self.fs) for i in range(self.length_n)]) # SPECTROGRAM ax = plt.subplot(gs[0]) w = sg.kaiser(w_size, 18) w = w * w_size / sum([pow(j, 2) for j in w]) f, time, spec = sg.spectrogram(#sg.decimate(tr.data,4,zero_phase=False), sg.decimate(self.data,ratioDecimate,zero_phase=False), fs=self.fs/ratioDecimate, nperseg=w_size, noverlap=0.9*w_size, nfft=1.5*w_size, window=w, scaling='density') # PSD in unit(x)**2/Hz spec_db = 10 * np.log10(spec) # ax.pcolormesh(time, f, spec_db, shading='flat', vmin=30, vmax=85) ax.pcolormesh(time, f, spec_db, shading='gouraud') ax.set_ylim((0,fMax)) # ax.set_xlim((0,self.length_n/self.fs-0.5)) ax.set_xlim((time[0],time[-1])) plt.xticks([]) plt.yticks(np.linspace(0,fMax,4),size=14) plt.ylabel('Freq. (Hz)', size=14) plt.title('Signal and spectrogram', size=14) # Signal ax = plt.subplot(gs[1]) ax.plot(x,self.data) ax.set_ylim((1.1*np.round(np.min(self.data),2),np.round(1.1*np.max(self.data),2))) plt.yticks([np.round(np.min(self.data),2),0,np.round(np.max(self.data),2)],size=14) plt.ylabel('Amplitude', size=14) # ax.xaxis.tick_top() # Results on each frequency band for i in range(n_bands): # Decided classes ax = plt.subplot(gs[3+i*n_bands+1*i%2]) s1 = self.decidedClasses[n_bands-1-i,:].astype(np.double) mask1 = np.isfinite(s1) (a,)=np.shape(self.decidedClasses[n_bands-1-i,mask1]) toPlot = np.array(self.decidedClasses[n_bands-1-i,mask1].reshape(1,a),dtype=int) # # lineObjects = ax.pcolor(toPlot, vmin=-1, vmax=5, cmap=Pastel1) ax.pcolor(toPlot, cmap='Set1') #vmin=-1, vmax=5, # ax.legend(handles=[line]) # ax.set_xlim((0,a)) # # ax.set_xlabel('') # # ax.set_xticks([]) # ax.set_ylabel('') # ax.set_yticks([]) # # title = 'Probability\n' + '(%d-%d Hz)'%(config.analysis['bandwidth']['f_min'][n_bands-1-i],config.analysis['bandwidth']['f_max'][n_bands-1-i]) # # plt.ylabel(title, size=14) # if i==0: # # plt.legend(lineObjects, analyzer.labelEncoder.classes_) # plt.title('Prediction results in the %d different frequency bands'%n_bands, size=14) # Predicted probabilities ax = plt.subplot(gs[3+i*n_bands+1+1*i%2]) s1 = self.predictedProbas[n_bands-1-i,:,:].astype(np.double) mask1 = np.isfinite(s1) mask1 = mask1[:,0] # /!\ safe in this case: index 0 is None => all are None # # lineObjects = ax.plot(x[mask1],self.predictedProbas[n_bands-1-i,mask1],'.') ax.plot(x[mask1],self.predictedProbas[n_bands-1-i,mask1],'.') # Est-ce-qu'on peut mettre un cmap ici ou pas ? colormap = plt.cm.Set1 #nipy_spectral, Set1,Paired colors = [colormap(i) for i in np.linspace(0, 1,len(ax.lines))] for i,j in enumerate(ax.lines): j.set_color(colors[i]) # ax.legend(handles=[line]) # # plt.gcf().autofmt_xdate() # fig.autofmt_xdate() # dates_format = matplotlib.dates.DateFormatter('%d/%m/%y %H:%M:%S') # # plt.xlim((self.t_start,self.t_end)) # ax.xaxis.set_major_formatter(dates_format) # plt.yticks([0,0.2,0.4,0.6,0.8],size=14) # plt.xticks(fontsize=14) # title = 'Probability and \npredicted class\n' + '(%d-%d Hz)'%(config.analysis['bandwidth']['f_min'][n_bands-1-i],config.analysis['bandwidth']['f_max'][n_bands-1-i]) # plt.ylabel(title, size=14) # plt.xlabel('Date', size=14) # plt.yticks(fontsize=14) # General things on the figure fig.subplots_adjust(hspace=0) title = self.path.split('/')[-1].split('.')[0] plt.suptitle(title, size=20) # fig.autofmt_xdate() # <-- does not work alone, and kills everythink if uncommented dates_format = matplotlib.dates.DateFormatter('%d/%m/%y %H:%M:%S') plt.xlim((self.t_start,self.t_end)) ax.xaxis.set_major_formatter(dates_format) plt.xticks(fontsize=14, rotation=30) # # If online displaying needed if onlineDisplay: plt.show() # If saving needed if saveDisplay: path = config.general['project_root'] + config.application['name'].upper() + '/res/' + config.configuration_number + '/' + config.general['path_to_visuals'] plt.savefig(path+title+'.png',format='png') if self._verbatim > 1: print("\tRecording display has been saved") plt.clf() plt.close(fig) return
gpl-3.0
mvernacc/proptools
docs/source/examples/solid/plots/equilibrium_pressure.py
1
1734
"""Illustrate the chamber pressure equilibrium of a solid rocket motor.""" from matplotlib import pyplot as plt import numpy as np p_c = np.linspace(1e6, 10e6) # Chamber pressure [units: pascal]. # Propellant properties gamma = 1.26 # Exhaust gas ratio of specific heats [units: dimensionless]. rho_solid = 1510. # Solid propellant density [units: kilogram meter**-3]. n = 0.5 # Propellant burn rate exponent [units: dimensionless]. a = 2.54e-3 * (6.9e6)**(-n) # Burn rate coefficient, such that the propellant # burns at 2.54 mm s**-1 at 6.9 MPa [units: meter second**-1 pascal**-n]. c_star = 1209. # Characteristic velocity [units: meter second**-1]. # Motor geometry A_t = 839e-6 # Throat area [units: meter**2]. A_b = 1.25 # Burn area [units: meter**2]. # Compute the nozzle mass flow rate at each chamber pressure. # [units: kilogram second**-1]. m_dot_nozzle = p_c * A_t / c_star # Compute the combustion mass addition rate at each chamber pressure. # [units: kilogram second**-1]. m_dot_combustion = A_b * rho_solid * a * p_c**n # Plot the mass rates plt.plot(p_c * 1e-6, m_dot_nozzle, label='Nozzle') plt.plot(p_c * 1e-6, m_dot_combustion, label='Combustion') plt.xlabel('Chamber pressure [MPa]') plt.ylabel('Mass rate [kg / s]') # Find where the mass rates are equal (e.g. the equilibrium). i_equil = np.argmin(abs(m_dot_combustion - m_dot_nozzle)) m_dot_equil = m_dot_nozzle[i_equil] p_c_equil = p_c[i_equil] # Plot the equilibrium point. plt.scatter(p_c_equil * 1e-6, m_dot_equil, marker='o', color='black', label='Equilibrium') plt.axvline(x=p_c_equil * 1e-6, color='grey', linestyle='--') plt.title('Chamber pressure: stable equilibrium, $n =$ {:.1f}'.format(n)) plt.legend() plt.show()
mit
adamgreenhall/scikit-learn
examples/bicluster/plot_spectral_coclustering.py
276
1736
""" ============================================== A demo of the Spectral Co-Clustering algorithm ============================================== This example demonstrates how to generate a dataset and bicluster it using the the Spectral Co-Clustering algorithm. The dataset is generated using the ``make_biclusters`` function, which creates a matrix of small values and implants bicluster with large values. The rows and columns are then shuffled and passed to the Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to make biclusters contiguous shows how accurately the algorithm found the biclusters. """ print(__doc__) # Author: Kemal Eren <[email protected]> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_biclusters from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralCoclustering from sklearn.metrics import consensus_score data, rows, columns = make_biclusters( shape=(300, 300), n_clusters=5, noise=5, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralCoclustering(n_clusters=5, random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.3f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.show()
bsd-3-clause
chenyyx/scikit-learn-doc-zh
examples/en/classification/plot_classification_probability.py
138
2871
""" =============================== Plot classification probability =============================== Plot the classification probability for different classifiers. We use a 3 class dataset, and we classify it with a Support Vector classifier, L1 and L2 penalized logistic regression with either a One-Vs-Rest or multinomial setting, and Gaussian process classification. The logistic regression is not a multiclass classifier out of the box. As a result it can identify only the first class. """ print(__doc__) # Author: Alexandre Gramfort <[email protected]> # License: BSD 3 clause import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn import datasets iris = datasets.load_iris() X = iris.data[:, 0:2] # we only take the first two features for visualization y = iris.target n_features = X.shape[1] C = 1.0 kernel = 1.0 * RBF([1.0, 1.0]) # for GPC # Create different classifiers. The logistic regression cannot do # multiclass out of the box. classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'), 'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'), 'Linear SVC': SVC(kernel='linear', C=C, probability=True, random_state=0), 'L2 logistic (Multinomial)': LogisticRegression( C=C, solver='lbfgs', multi_class='multinomial'), 'GPC': GaussianProcessClassifier(kernel) } n_classifiers = len(classifiers) plt.figure(figsize=(3 * 2, n_classifiers * 2)) plt.subplots_adjust(bottom=.2, top=.95) xx = np.linspace(3, 9, 100) yy = np.linspace(1, 5, 100).T xx, yy = np.meshgrid(xx, yy) Xfull = np.c_[xx.ravel(), yy.ravel()] for index, (name, classifier) in enumerate(classifiers.items()): classifier.fit(X, y) y_pred = classifier.predict(X) classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100 print("classif_rate for %s : %f " % (name, classif_rate)) # View probabilities= probas = classifier.predict_proba(Xfull) n_classes = np.unique(y_pred).size for k in range(n_classes): plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1) plt.title("Class %d" % k) if k == 0: plt.ylabel(name) imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)), extent=(3, 9, 1, 5), origin='lower') plt.xticks(()) plt.yticks(()) idx = (y_pred == k) if idx.any(): plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k') ax = plt.axes([0.15, 0.04, 0.7, 0.05]) plt.title("Probability") plt.colorbar(imshow_handle, cax=ax, orientation='horizontal') plt.show()
gpl-3.0
bhilburn/gnuradio
gr-filter/examples/fir_filter_fff.py
47
4014
#!/usr/bin/env python # # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, filter from gnuradio import analog from gnuradio import blocks from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser import sys try: import scipy except ImportError: print "Error: could not import scipy (http://www.scipy.org/)" sys.exit(1) try: import pylab except ImportError: print "Error: could not import pylab (http://matplotlib.sourceforge.net/)" sys.exit(1) class example_fir_filter_fff(gr.top_block): def __init__(self, N, fs, bw, tw, atten, D): gr.top_block.__init__(self) self._nsamps = N self._fs = fs self._bw = bw self._tw = tw self._at = atten self._decim = D taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at) print "Num. Taps: ", len(taps) self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1) self.head = blocks.head(gr.sizeof_float, self._nsamps) self.filt0 = filter.fir_filter_fff(self._decim, taps) self.vsnk_src = blocks.vector_sink_f() self.vsnk_out = blocks.vector_sink_f() self.connect(self.src, self.head, self.vsnk_src) self.connect(self.head, self.filt0, self.vsnk_out) def main(): parser = OptionParser(option_class=eng_option, conflict_handler="resolve") parser.add_option("-N", "--nsamples", type="int", default=10000, help="Number of samples to process [default=%default]") parser.add_option("-s", "--samplerate", type="eng_float", default=8000, help="System sample rate [default=%default]") parser.add_option("-B", "--bandwidth", type="eng_float", default=1000, help="Filter bandwidth [default=%default]") parser.add_option("-T", "--transition", type="eng_float", default=100, help="Transition band [default=%default]") parser.add_option("-A", "--attenuation", type="eng_float", default=80, help="Stopband attenuation [default=%default]") parser.add_option("-D", "--decimation", type="int", default=1, help="Decmation factor [default=%default]") (options, args) = parser.parse_args () put = example_fir_filter_fff(options.nsamples, options.samplerate, options.bandwidth, options.transition, options.attenuation, options.decimation) put.run() data_src = scipy.array(put.vsnk_src.data()) data_snk = scipy.array(put.vsnk_out.data()) # Plot the signals PSDs nfft = 1024 f1 = pylab.figure(1, figsize=(12,10)) s1 = f1.add_subplot(1,1,1) s1.psd(data_src, NFFT=nfft, noverlap=nfft/4, Fs=options.samplerate) s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4, Fs=options.samplerate) f2 = pylab.figure(2, figsize=(12,10)) s2 = f2.add_subplot(1,1,1) s2.plot(data_src) s2.plot(data_snk.real, 'g') pylab.show() if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
gpl-3.0
hitszxp/scikit-learn
sklearn/metrics/classification.py
8
54908
"""Metrics to assess performance on classification task given classe prediction Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Olivier Grisel <[email protected]> # Arnaud Joly <[email protected]> # Jochen Wersdorfer <[email protected]> # Lars Buitinck <[email protected]> # Joel Nothman <[email protected]> # Noel Dawe <[email protected]> # Jatin Shah <[email protected]> # Saurabh Jha <[email protected]> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy.sparse import csr_matrix from scipy.spatial.distance import hamming as sp_hamming from ..preprocessing import LabelBinarizer from ..preprocessing import LabelEncoder from ..utils import check_array from ..utils import check_consistent_length from ..preprocessing import MultiLabelBinarizer from ..utils import column_or_1d from ..utils.multiclass import unique_labels from ..utils.multiclass import type_of_target from ..utils.validation import _num_samples from ..utils.sparsefuncs import count_nonzero from .base import UndefinedMetricWarning def _check_targets(y_true, y_pred): """Check that y_true and y_pred belong to the same classification task This converts multiclass or binary types to a common shape, and raises a ValueError for a mix of multilabel and multiclass targets, a mix of multilabel formats, for the presence of continuous-valued or multioutput targets, or for targets of different lengths. Column vectors are squeezed to 1d, while multilabel formats are returned as CSR sparse label indicators. Parameters ---------- y_true : array-like y_pred : array-like Returns ------- type_true : one of {'multilabel-indicator', 'multilabel-sequences', \ 'multiclass', 'binary'} The type of the true target data, as output by ``utils.multiclass.type_of_target`` y_true : array or indicator matrix y_pred : array or indicator matrix """ check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true) type_pred = type_of_target(y_pred) y_type = set([type_true, type_pred]) if y_type == set(["binary", "multiclass"]): y_type = set(["multiclass"]) if len(y_type) > 1: raise ValueError("Can't handle mix of {0} and {1}" "".format(type_true, type_pred)) # We can't have more than one value on y_type => The set is no more needed y_type = y_type.pop() # No metrics support "multiclass-multioutput" format if (y_type not in ["binary", "multiclass", "multilabel-indicator", "multilabel-sequences"]): raise ValueError("{0} is not supported".format(y_type)) if y_type in ["binary", "multiclass"]: y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type.startswith('multilabel'): if y_type == 'multilabel-sequences': labels = unique_labels(y_true, y_pred) binarizer = MultiLabelBinarizer(classes=labels, sparse_output=True) y_true = binarizer.fit_transform(y_true) y_pred = binarizer.fit_transform(y_pred) y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = 'multilabel-indicator' return y_type, y_true, y_pred def _weighted_sum(sample_score, sample_weight, normalize=False): if normalize: return np.average(sample_score, weights=sample_weight) elif sample_weight is not None: return np.dot(sample_score, sample_weight) else: return sample_score.sum() def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None): """Accuracy classification score. In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must *exactly* match the corresponding set of labels in y_true. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the correctly classified samples (float), else it returns the number of correctly classified samples (int). The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- jaccard_similarity_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equal to the ``jaccard_similarity_score`` function. Examples -------- >>> import numpy as np >>> from sklearn.metrics import accuracy_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> accuracy_score(y_true, y_pred) 0.5 >>> accuracy_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): differing_labels = count_nonzero(y_true - y_pred, axis=1) score = differing_labels == 0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def confusion_matrix(y_true, y_pred, labels=None): """Compute confusion matrix to evaluate the accuracy of a classification By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}` is equal to the number of observations known to be in group :math:`i` but predicted to be in group :math:`j`. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. labels : array, shape = [n_classes], optional List of labels to index the matrix. This may be used to reorder or select a subset of labels. If none is given, those that appear at least once in ``y_true`` or ``y_pred`` are used in sorted order. Returns ------- C : array, shape = [n_classes, n_classes] Confusion matrix References ---------- .. [1] `Wikipedia entry for the Confusion matrix <http://en.wikipedia.org/wiki/Confusion_matrix>`_ Examples -------- >>> from sklearn.metrics import confusion_matrix >>> y_true = [2, 0, 2, 2, 0, 1] >>> y_pred = [0, 0, 2, 2, 0, 2] >>> confusion_matrix(y_true, y_pred) array([[2, 0, 0], [0, 0, 1], [1, 0, 2]]) """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type not in ("binary", "multiclass"): raise ValueError("%s is not supported" % y_type) if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) n_labels = labels.size label_to_ind = dict((y, x) for x, y in enumerate(labels)) # convert yt, yp into index y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred]) y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true]) # intersect y_pred, y_true with labels, eliminate items not in labels ind = np.logical_and(y_pred < n_labels, y_true < n_labels) y_pred = y_pred[ind] y_true = y_true[ind] CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)), shape=(n_labels, n_labels) ).toarray() return CM def jaccard_similarity_score(y_true, y_pred, normalize=True, sample_weight=None): """Jaccard similarity coefficient score The Jaccard index [1], or Jaccard similarity coefficient, defined as the size of the intersection divided by the size of the union of two label sets, is used to compare set of predicted labels for a sample to the corresponding set of labels in ``y_true``. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the sum of the Jaccard similarity coefficient over the sample set. Otherwise, return the average of Jaccard similarity coefficient. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the average Jaccard similarity coefficient, else it returns the sum of the Jaccard similarity coefficient over the sample set. The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- accuracy_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equivalent to the ``accuracy_score``. It differs in the multilabel classification problem. References ---------- .. [1] `Wikipedia entry for the Jaccard index <http://en.wikipedia.org/wiki/Jaccard_index>`_ Examples -------- >>> import numpy as np >>> from sklearn.metrics import jaccard_similarity_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> jaccard_similarity_score(y_true, y_pred) 0.5 >>> jaccard_similarity_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\ np.ones((2, 2))) 0.75 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): with np.errstate(divide='ignore', invalid='ignore'): # oddly, we may get an "invalid" rather than a "divide" error here pred_or_true = count_nonzero(y_true + y_pred, axis=1) pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1) score = pred_and_true / pred_or_true # If there is no label, it results in a Nan instead, we set # the jaccard to 1: lim_{x->0} x/x = 1 # Note with py2.6 and np 1.3: we can't check safely for nan. score[pred_or_true == 0.0] = 1.0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def matthews_corrcoef(y_true, y_pred): """Compute the Matthews correlation coefficient (MCC) for binary classes The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary (two-class) classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] Only in the binary case does this relate to information about true and false positives and negatives. See references below. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. Returns ------- mcc : float The Matthews correlation coefficient (+1 represents a perfect prediction, 0 an average random prediction and -1 and inverse prediction). References ---------- .. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the accuracy of prediction algorithms for classification: an overview <http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_ .. [2] `Wikipedia entry for the Matthews Correlation Coefficient <http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_ Examples -------- >>> from sklearn.metrics import matthews_corrcoef >>> y_true = [+1, +1, +1, -1] >>> y_pred = [+1, -1, +1, +1] >>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS -0.33... """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type != "binary": raise ValueError("%s is not supported" % y_type) lb = LabelEncoder() lb.fit(np.hstack([y_true, y_pred])) y_true = lb.transform(y_true) y_pred = lb.transform(y_pred) with np.errstate(invalid='ignore'): mcc = np.corrcoef(y_true, y_pred)[0, 1] if np.isnan(mcc): return 0. else: return mcc def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None): """Zero-one classification loss. If normalize is ``True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). The best performance is 0. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of misclassifications. Otherwise, return the fraction of misclassifications. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float or int, If ``normalize == True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). Notes ----- In multilabel classification, the zero_one_loss function corresponds to the subset zero-one loss: for each sample, the entire set of labels must be correctly predicted, otherwise the loss for that sample is equal to one. See also -------- accuracy_score, hamming_loss, jaccard_similarity_score Examples -------- >>> from sklearn.metrics import zero_one_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> zero_one_loss(y_true, y_pred) 0.25 >>> zero_one_loss(y_true, y_pred, normalize=False) 1 In the multilabel case with binary label indicators: >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ score = accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight) if normalize: return 1 - score else: if sample_weight is not None: n_samples = np.sum(sample_weight) else: n_samples = _num_samples(y_true) return n_samples - score def f1_score(y_true, y_pred, labels=None, pos_label=1, average='weighted', sample_weight=None): """Compute the F1 score, also known as balanced F-score or F-measure The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) In the multi-class and multi-label case, this is the weighted average of the F1 score of each class. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array Integer array of labels. pos_label : str or int, 1 by default If ``average`` is not ``None`` and the classification target is binary, only this class's scores will be returned. average : string, [None, 'micro', 'macro', 'samples', 'weighted' (default)] If ``None``, the scores for each class are returned. Otherwise, unless ``pos_label`` is given in binary classification, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- f1_score : float or array of float, shape = [n_unique_labels] F1 score of the positive class in binary classification or weighted average of the F1 scores of each class for the multiclass task. References ---------- .. [1] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import f1_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average=None) array([ 0.8, 0. , 0. ]) """ return fbeta_score(y_true, y_pred, 1, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1, average='weighted', sample_weight=None): """Compute the F-beta score The F-beta score is the weighted harmonic mean of precision and recall, reaching its optimal value at 1 and its worst value at 0. The `beta` parameter determines the weight of precision in the combined score. ``beta < 1`` lends more weight to precision, while ``beta > 1`` favors recall (``beta -> 0`` considers only precision, ``beta -> inf`` only recall). Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta: float Weight of precision in harmonic mean. labels : array Integer array of labels. pos_label : str or int, 1 by default If ``average`` is not ``None`` and the classification target is binary, only this class's scores will be returned. average : string, [None, 'micro', 'macro', 'samples', 'weighted' (default)] If ``None``, the scores for each class are returned. Otherwise, unless ``pos_label`` is given in binary classification, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fbeta_score : float (if average is not None) or array of float, shape =\ [n_unique_labels] F-beta score of the positive class in binary classification or weighted average of the F-beta score of each class for the multiclass task. References ---------- .. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 327-328. .. [2] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import fbeta_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> fbeta_score(y_true, y_pred, average='macro', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average='micro', beta=0.5) ... # doctest: +ELLIPSIS 0.33... >>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average=None, beta=0.5) ... # doctest: +ELLIPSIS array([ 0.71..., 0. , 0. ]) """ _, _, f, _ = precision_recall_fscore_support(y_true, y_pred, beta=beta, labels=labels, pos_label=pos_label, average=average, warn_for=('f-score',), sample_weight=sample_weight) return f def _prf_divide(numerator, denominator, metric, modifier, average, warn_for): """Performs division and handles divide-by-zero. On zero-division, sets the corresponding result elements to zero and raises a warning. The metric, modifier and average arguments are used only for determining an appropriate warning. """ result = numerator / denominator mask = denominator == 0.0 if not np.any(mask): return result # remove infs result[mask] = 0.0 # build appropriate warning # E.g. "Precision and F-score are ill-defined and being set to 0.0 in # labels with no predicted samples" axis0 = 'sample' axis1 = 'label' if average == 'samples': axis0, axis1 = axis1, axis0 if metric in warn_for and 'f-score' in warn_for: msg_start = '{0} and F-score are'.format(metric.title()) elif metric in warn_for: msg_start = '{0} is'.format(metric.title()) elif 'f-score' in warn_for: msg_start = 'F-score is' else: return result msg = ('{0} ill-defined and being set to 0.0 {{0}} ' 'no {1} {2}s.'.format(msg_start, modifier, axis0)) if len(mask) == 1: msg = msg.format('due to') else: msg = msg.format('in {0}s with'.format(axis1)) warnings.warn(msg, UndefinedMetricWarning, stacklevel=2) return result def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None, pos_label=1, average=None, warn_for=('precision', 'recall', 'f-score'), sample_weight=None): """Compute precision, recall, F-measure and support for each class The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The F-beta score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0. The F-beta score weights recall more than precision by a factor of ``beta``. ``beta == 1.0`` means recall and precision are equally important. The support is the number of occurrences of each class in ``y_true``. If ``pos_label is None`` and in binary classification, this function returns the average precision, recall and F-measure if ``average`` is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta : float, 1.0 by default The strength of recall versus precision in the F-score. labels : array Integer array of labels. pos_label : str or int, 1 by default If ``average`` is not ``None`` and the classification target is binary, only this class's scores will be returned. average : string, [None (default), 'micro', 'macro', 'samples', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, unless ``pos_label`` is given in binary classification, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). warn_for : tuple or set, for internal use This determines which warnings will be made in the case that this function is being used to return only one of its metrics. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision: float (if average is not None) or array of float, shape =\ [n_unique_labels] recall: float (if average is not None) or array of float, , shape =\ [n_unique_labels] fbeta_score: float (if average is not None) or array of float, shape =\ [n_unique_labels] support: int (if average is not None) or array of int, shape =\ [n_unique_labels] The number of occurrences of each label in ``y_true``. References ---------- .. [1] `Wikipedia entry for the Precision and recall <http://en.wikipedia.org/wiki/Precision_and_recall>`_ .. [2] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_ .. [3] `Discriminative Methods for Multi-labeled Classification Advances in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu Godbole, Sunita Sarawagi <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>` Examples -------- >>> from sklearn.metrics import precision_recall_fscore_support >>> y_true = np.array([0, 1, 2, 0, 1, 2]) >>> y_pred = np.array([0, 2, 1, 0, 0, 1]) >>> precision_recall_fscore_support(y_true, y_pred, average='macro') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='micro') ... # doctest: +ELLIPSIS (0.33..., 0.33..., 0.33..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) """ average_options = (None, 'micro', 'macro', 'weighted', 'samples') if average not in average_options: raise ValueError('average has to be one of ' + str(average_options)) if beta <= 0: raise ValueError("beta should be >0 in the F-beta score") y_type, y_true, y_pred = _check_targets(y_true, y_pred) label_order = labels # save this for later if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) ### Calculate tp_sum, pred_sum, true_sum ### if y_type.startswith('multilabel'): sum_axis = 1 if average == 'samples' else 0 # calculate weighted counts true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) elif average == 'samples': raise ValueError("Sample-based precision, recall, fscore is " "not meaningful outside multilabel" "classification. See the accuracy_score instead.") else: lb = LabelEncoder() lb.fit(labels) y_true = lb.transform(y_true) y_pred = lb.transform(y_pred) labels = lb.classes_ # labels are now from 0 to len(labels) - 1 -> use bincount tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: # Pathological case true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels)) ### Select labels to keep ### if y_type == 'binary' and average is not None and pos_label is not None: if label_order is not None and len(label_order) == 2: warnings.warn('In the future, providing two `labels` values, as ' 'well as `average` will average over those ' 'labels. For now, please use `labels=None` with ' '`pos_label` to evaluate precision, recall and ' 'F-score for the positive label only.', FutureWarning) if pos_label not in labels: if len(labels) == 1: # Only negative labels return (0., 0., 0., 0) else: raise ValueError("pos_label=%r is not a valid label: %r" % (pos_label, labels)) pos_label_idx = labels == pos_label tp_sum = tp_sum[pos_label_idx] pred_sum = pred_sum[pos_label_idx] true_sum = true_sum[pos_label_idx] elif average == 'micro': tp_sum = np.array([tp_sum.sum()]) pred_sum = np.array([pred_sum.sum()]) true_sum = np.array([true_sum.sum()]) ### Finally, we have all our sufficient statistics. Divide! ### beta2 = beta ** 2 with np.errstate(divide='ignore', invalid='ignore'): # Divide, and on zero-division, set scores to 0 and warn: # Oddly, we may get an "invalid" rather than a "divide" error # here. precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, warn_for) recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, warn_for) # Don't need to warn for F: either P or R warned, or tp == 0 where pos # and true are nonzero, in which case, F is well-defined and zero f_score = ((1 + beta2) * precision * recall / (beta2 * precision + recall)) f_score[tp_sum == 0] = 0.0 ## Average the results ## if average == 'weighted': weights = true_sum if weights.sum() == 0: return 0, 0, 0, None elif average == 'samples': weights = sample_weight else: weights = None if average is not None: precision = np.average(precision, weights=weights) recall = np.average(recall, weights=weights) f_score = np.average(f_score, weights=weights) true_sum = None # return no support elif label_order is not None: indices = np.searchsorted(labels, label_order) precision = precision[indices] recall = recall[indices] f_score = f_score[indices] true_sum = true_sum[indices] return precision, recall, f_score, true_sum def precision_score(y_true, y_pred, labels=None, pos_label=1, average='weighted', sample_weight=None): """Compute the precision The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The best value is 1 and the worst value is 0. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array Integer array of labels. pos_label : str or int, 1 by default If ``average`` is not ``None`` and the classification target is binary, only this class's scores will be returned. average : string, [None, 'micro', 'macro', 'samples', 'weighted' (default)] If ``None``, the scores for each class are returned. Otherwise, unless ``pos_label`` is given in binary classification, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision : float (if average is not None) or array of float, shape =\ [n_unique_labels] Precision of the positive class in binary classification or weighted average of the precision of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import precision_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> precision_score(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS array([ 0.66..., 0. , 0. ]) """ p, _, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('precision',), sample_weight=sample_weight) return p def recall_score(y_true, y_pred, labels=None, pos_label=1, average='weighted', sample_weight=None): """Compute the recall The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The best value is 1 and the worst value is 0. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array Integer array of labels. pos_label : str or int, 1 by default If ``average`` is not ``None`` and the classification target is binary, only this class's scores will be returned. average : string, [None, 'micro', 'macro', 'samples', 'weighted' (default)] If ``None``, the scores for each class are returned. Otherwise, unless ``pos_label`` is given in binary classification, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- recall : float (if average is not None) or array of float, shape =\ [n_unique_labels] Recall of the positive class in binary classification or weighted average of the recall of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import recall_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average=None) array([ 1., 0., 0.]) """ _, r, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('recall',), sample_weight=sample_weight) return r def classification_report(y_true, y_pred, labels=None, target_names=None, sample_weight=None, digits=2): """Build a text report showing the main classification metrics Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array, shape = [n_labels] Optional list of label indices to include in the report. target_names : list of strings Optional display names matching the labels (same order). sample_weight : array-like of shape = [n_samples], optional Sample weights. digits : int Number of digits for formatting output floating point values Returns ------- report : string Text summary of the precision, recall, F1 score for each class. Examples -------- >>> from sklearn.metrics import classification_report >>> y_true = [0, 1, 2, 2, 2] >>> y_pred = [0, 0, 2, 2, 1] >>> target_names = ['class 0', 'class 1', 'class 2'] >>> print(classification_report(y_true, y_pred, target_names=target_names)) precision recall f1-score support <BLANKLINE> class 0 0.50 1.00 0.67 1 class 1 0.00 0.00 0.00 1 class 2 1.00 0.67 0.80 3 <BLANKLINE> avg / total 0.70 0.60 0.61 5 <BLANKLINE> """ if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) last_line_heading = 'avg / total' if target_names is None: width = len(last_line_heading) target_names = ['%s' % l for l in labels] else: width = max(len(cn) for cn in target_names) width = max(width, len(last_line_heading), digits) headers = ["precision", "recall", "f1-score", "support"] fmt = '%% %ds' % width # first column: class name fmt += ' ' fmt += ' '.join(['% 9s' for _ in headers]) fmt += '\n' headers = [""] + headers report = fmt % tuple(headers) report += '\n' p, r, f1, s = precision_recall_fscore_support(y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight) for i, label in enumerate(labels): values = [target_names[i]] for v in (p[i], r[i], f1[i]): values += ["{0:0.{1}f}".format(v, digits)] values += ["{0}".format(s[i])] report += fmt % tuple(values) report += '\n' # compute averages values = [last_line_heading] for v in (np.average(p, weights=s), np.average(r, weights=s), np.average(f1, weights=s)): values += ["{0:0.{1}f}".format(v, digits)] values += ['{0}'.format(np.sum(s))] report += fmt % tuple(values) return report def hamming_loss(y_true, y_pred, classes=None): """Compute the average Hamming loss. The Hamming loss is the fraction of labels that are incorrectly predicted. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. classes : array, shape = [n_labels], optional Integer array of labels. Returns ------- loss : float or int, Return the average Hamming loss between element of ``y_true`` and ``y_pred``. See Also -------- accuracy_score, jaccard_similarity_score, zero_one_loss Notes ----- In multiclass classification, the Hamming loss correspond to the Hamming distance between ``y_true`` and ``y_pred`` which is equivalent to the subset ``zero_one_loss`` function. In multilabel classification, the Hamming loss is different from the subset zero-one loss. The zero-one loss considers the entire set of labels for a given sample incorrect if it does entirely match the true set of labels. Hamming loss is more forgiving in that it penalizes the individual labels. The Hamming loss is upperbounded by the subset zero-one loss. When normalized over samples, the Hamming loss is always between 0 and 1. References ---------- .. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification: An Overview. International Journal of Data Warehousing & Mining, 3(3), 1-13, July-September 2007. .. [2] `Wikipedia entry on the Hamming distance <http://en.wikipedia.org/wiki/Hamming_distance>`_ Examples -------- >>> from sklearn.metrics import hamming_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> hamming_loss(y_true, y_pred) 0.25 In the multilabel case with binary label indicators: >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2))) 0.75 """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if classes is None: classes = unique_labels(y_true, y_pred) else: classes = np.asarray(classes) if y_type.startswith('multilabel'): n_differences = count_nonzero(y_true - y_pred) return (n_differences / (y_true.shape[0] * len(classes))) elif y_type in ["binary", "multiclass"]: return sp_hamming(y_true, y_pred) else: raise ValueError("{0} is not supported".format(y_type)) def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None): """Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of the true labels given a probabilistic classifier's predictions. For a single sample with true label yt in {0,1} and estimated probability yp that yt = 1, the log loss is -log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp)) Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels for n_samples samples. y_pred : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. eps : float Log loss is undefined for p=0 or p=1, so probabilities are clipped to max(eps, min(1 - eps, p)). normalize : bool, optional (default=True) If true, return the mean loss per sample. Otherwise, return the sum of the per-sample losses. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float Examples -------- >>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616... References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Notes ----- The logarithm used is the natural logarithm (base-e). """ lb = LabelBinarizer() T = lb.fit_transform(y_true) if T.shape[1] == 1: T = np.append(1 - T, T, axis=1) # Clipping Y = np.clip(y_pred, eps, 1 - eps) # This happens in cases when elements in y_pred have type "str". if not isinstance(Y, np.ndarray): raise ValueError("y_pred should be an array of floats.") # If y_pred is of single dimension, assume y_true to be binary # and then check. if Y.ndim == 1: Y = Y[:, np.newaxis] if Y.shape[1] == 1: Y = np.append(1 - Y, Y, axis=1) # Check if dimensions are consistent. check_consistent_length(T, Y) T = check_array(T) Y = check_array(Y) if T.shape[1] != Y.shape[1]: raise ValueError("y_true and y_pred have different number of classes " "%d, %d" % (T.shape[1], Y.shape[1])) # Renormalize Y /= Y.sum(axis=1)[:, np.newaxis] loss = -(T * np.log(Y)).sum(axis=1) return _weighted_sum(loss, sample_weight, normalize) def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None): """Average hinge loss (non-regularized) In binary class case, assuming labels in y_true are encoded with +1 and -1, when a prediction mistake is made, ``margin = y_true * pred_decision`` is always negative (since the signs disagree), implying ``1 - margin`` is always greater than 1. The cumulated hinge loss is therefore an upper bound of the number of mistakes made by the classifier. In multiclass case, the function expects that either all the labels are included in y_true or an optional labels argument is provided which contains all the labels. The multilabel margin is calculated according to Crammer-Singer's method. As in the binary case, the cumulated hinge loss is an upper bound of the number of mistakes made by the classifier. Parameters ---------- y_true : array, shape = [n_samples] True target, consisting of integers of two values. The positive label must be greater than the negative label. pred_decision : array, shape = [n_samples] or [n_samples, n_classes] Predicted decisions, as output by decision_function (floats). labels : array, optional, default None Contains all the labels for the problem. Used in multiclass hinge loss. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float References ---------- .. [1] `Wikipedia entry on the Hinge loss <http://en.wikipedia.org/wiki/Hinge_loss>`_ .. [2] Koby Crammer, Yoram Singer. On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines. Journal of Machine Learning Research 2, (2001), 265-292 .. [3] 'L1 AND L2 Regularization for Multiclass Hinge Loss Models by Robert C. Moore, John DeNero. <http://www.ttic.edu/sigml/symposium2011/papers/ Moore+DeNero_Regularization.pdf>' Examples -------- >>> from sklearn import svm >>> from sklearn.metrics import hinge_loss >>> X = [[0], [1]] >>> y = [-1, 1] >>> est = svm.LinearSVC(random_state=0) >>> est.fit(X, y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='l2', max_iter=1000, multi_class='ovr', penalty='l2', random_state=0, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-2], [3], [0.5]]) >>> pred_decision # doctest: +ELLIPSIS array([-2.18..., 2.36..., 0.09...]) >>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS 0.30... In the multiclass case: >>> X = np.array([[0], [1], [2], [3]]) >>> Y = np.array([0, 1, 2, 3]) >>> labels = np.array([0, 1, 2, 3]) >>> est = svm.LinearSVC() >>> est.fit(X, Y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='l2', max_iter=1000, multi_class='ovr', penalty='l2', random_state=None, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-1], [2], [3]]) >>> y_true = [0, 2, 3] >>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS 0.56... """ check_consistent_length(y_true, pred_decision, sample_weight) pred_decision = check_array(pred_decision, ensure_2d=False) y_true = column_or_1d(y_true) y_true_unique = np.unique(y_true) if y_true_unique.size > 2: if (labels is None and pred_decision.ndim > 1 and (np.size(y_true_unique) != pred_decision.shape[1])): raise ValueError("Please include all labels in y_true " "or pass labels as third argument") if labels is None: labels = y_true_unique le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) mask = np.ones_like(pred_decision, dtype=bool) mask[np.arange(y_true.shape[0]), y_true] = False margin = pred_decision[~mask] margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1), axis=1) else: # Handles binary class case # this code assumes that positive and negative labels # are encoded as +1 and -1 respectively pred_decision = column_or_1d(pred_decision) pred_decision = np.ravel(pred_decision) lbin = LabelBinarizer(neg_label=-1) y_true = lbin.fit_transform(y_true)[:, 0] try: margin = y_true * pred_decision except TypeError: raise TypeError("pred_decision should be an array of floats.") losses = 1 - margin # The hinge_loss doesn't penalize good enough predictions. losses[losses <= 0] = 0 return np.average(losses, weights=sample_weight)
bsd-3-clause
wasade/qiita
qiita_db/test/test_analysis.py
1
22656
from unittest import TestCase, main from os import remove from os.path import exists, join from datetime import datetime from shutil import move from biom import load_table import pandas as pd from qiita_core.util import qiita_test_checker from qiita_db.analysis import Analysis, Collection from qiita_db.job import Job from qiita_db.user import User from qiita_db.exceptions import QiitaDBStatusError from qiita_db.util import get_mountpoint from qiita_db.study import Study, StudyPerson from qiita_db.data import ProcessedData from qiita_db.metadata_template import SampleTemplate # ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- @qiita_test_checker() class TestAnalysis(TestCase): def setUp(self): self.analysis = Analysis(1) _, self.fp = get_mountpoint("analysis")[0] self.biom_fp = join(self.fp, "1_analysis_18S.biom") self.map_fp = join(self.fp, "1_analysis_mapping.txt") def tearDown(self): with open(self.biom_fp, 'w') as f: f.write("") with open(self.map_fp, 'w') as f: f.write("") fp = join(get_mountpoint('analysis')[0][1], 'testfile.txt') if exists(fp): remove(fp) mp = get_mountpoint("processed_data")[0][1] study2fp = join(mp, "2_2_study_1001_closed_reference_otu_table.biom") if exists(study2fp): move(study2fp, join(mp, "2_study_1001_closed_reference_otu_table.biom")) def test_lock_check(self): for status in ["queued", "running", "public", "completed", "error"]: new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis") new.status = status with self.assertRaises(QiitaDBStatusError): new._lock_check(self.conn_handler) def test_lock_check_ok(self): self.analysis.status = "in_construction" self.analysis._lock_check(self.conn_handler) def test_status_setter_checks(self): self.analysis.status = "public" with self.assertRaises(QiitaDBStatusError): self.analysis.status = "queued" def test_get_by_status(self): self.assertEqual(Analysis.get_by_status('public'), []) self.analysis.status = "public" self.assertEqual(Analysis.get_by_status('public'), [1]) def test_has_access_public(self): self.conn_handler.execute("UPDATE qiita.analysis SET " "analysis_status_id = 6") self.assertTrue(self.analysis.has_access(User("[email protected]"))) def test_has_access_shared(self): self.assertTrue(self.analysis.has_access(User("[email protected]"))) def test_has_access_private(self): self.assertTrue(self.analysis.has_access(User("[email protected]"))) def test_has_access_admin(self): self.assertTrue(self.analysis.has_access(User("[email protected]"))) def test_has_access_no_access(self): self.assertFalse(self.analysis.has_access(User("[email protected]"))) def test_create(self): sql = "SELECT EXTRACT(EPOCH FROM NOW())" time1 = float(self.conn_handler.execute_fetchall(sql)[0][0]) new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis") self.assertEqual(new.id, 3) sql = ("SELECT analysis_id, email, name, description, " "analysis_status_id, pmid, EXTRACT(EPOCH FROM timestamp) " "FROM qiita.analysis WHERE analysis_id = 3") obs = self.conn_handler.execute_fetchall(sql) self.assertEqual(obs[0][:-1], [3, '[email protected]', 'newAnalysis', 'A New Analysis', 1, None]) self.assertTrue(time1 < float(obs[0][-1])) def test_create_parent(self): sql = "SELECT EXTRACT(EPOCH FROM NOW())" time1 = float(self.conn_handler.execute_fetchall(sql)[0][0]) new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis", Analysis(1)) self.assertEqual(new.id, 3) sql = ("SELECT analysis_id, email, name, description, " "analysis_status_id, pmid, EXTRACT(EPOCH FROM timestamp) " "FROM qiita.analysis WHERE analysis_id = 3") obs = self.conn_handler.execute_fetchall(sql) self.assertEqual(obs[0][:-1], [3, '[email protected]', 'newAnalysis', 'A New Analysis', 1, None]) self.assertTrue(time1 < float(obs[0][-1])) sql = "SELECT * FROM qiita.analysis_chain WHERE child_id = 3" obs = self.conn_handler.execute_fetchall(sql) self.assertEqual(obs, [[1, 3]]) def test_retrieve_owner(self): self.assertEqual(self.analysis.owner, "[email protected]") def test_retrieve_name(self): self.assertEqual(self.analysis.name, "SomeAnalysis") def test_retrieve_description(self): self.assertEqual(self.analysis.description, "A test analysis") def test_set_description(self): self.analysis.description = "New description" self.assertEqual(self.analysis.description, "New description") def test_retrieve_samples(self): exp = {1: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']} self.assertEqual(self.analysis.samples, exp) def test_retrieve_dropped_samples(self): # Create and populate second study to do test with info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "portal_type_id": 3, "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "emp_person_id": StudyPerson(2), "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } metadata_dict = { 'SKB8.640193': {'physical_location': 'location1', 'has_physical_specimen': True, 'has_extracted_data': True, 'sample_type': 'type1', 'required_sample_info_status': 'received', 'collection_timestamp': datetime(2014, 5, 29, 12, 24, 51), 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 1', 'str_column': 'Value for sample 1', 'latitude': 42.42, 'longitude': 41.41}, 'SKD8.640184': {'physical_location': 'location1', 'has_physical_specimen': True, 'has_extracted_data': True, 'sample_type': 'type1', 'required_sample_info_status': 'received', 'collection_timestamp': datetime(2014, 5, 29, 12, 24, 51), 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 2', 'str_column': 'Value for sample 2', 'latitude': 4.2, 'longitude': 1.1}, 'SKB7.640196': {'physical_location': 'location1', 'has_physical_specimen': True, 'has_extracted_data': True, 'sample_type': 'type1', 'required_sample_info_status': 'received', 'collection_timestamp': datetime(2014, 5, 29, 12, 24, 51), 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 3', 'str_column': 'Value for sample 3', 'latitude': 4.8, 'longitude': 4.41}, } metadata = pd.DataFrame.from_dict(metadata_dict, orient='index') Study.create(User("[email protected]"), "Test study 2", [1], info) SampleTemplate.create(metadata, Study(2)) mp = get_mountpoint("processed_data")[0][1] study_fp = join(mp, "2_study_1001_closed_reference_otu_table.biom") ProcessedData.create("processed_params_uclust", 1, [(study_fp, 6)], study=Study(2), data_type="16S") self.conn_handler.execute( "INSERT INTO qiita.analysis_sample (analysis_id, " "processed_data_id, sample_id) VALUES " "(1,2,'2.SKB8.640193'), (1,2,'2.SKD8.640184'), " "(1,2,'2.SKB7.640196')") samples = {1: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'], 2: ['2.SKB8.640193', '2.SKD8.640184']} self.analysis._build_biom_tables(samples, 10000, conn_handler=self.conn_handler) exp = {1: {'1.SKM4.640180', '1.SKM9.640192'}, 2: {'2.SKB7.640196'}} self.assertEqual(self.analysis.dropped_samples, exp) def test_retrieve_data_types(self): exp = ['18S'] self.assertEqual(self.analysis.data_types, exp) def test_retrieve_shared_with(self): self.assertEqual(self.analysis.shared_with, ["[email protected]"]) def test_retrieve_biom_tables(self): exp = {"18S": join(self.fp, "1_analysis_18S.biom")} self.assertEqual(self.analysis.biom_tables, exp) def test_all_associated_filepaths(self): exp = {12, 13, 14, 15} self.assertEqual(self.analysis.all_associated_filepath_ids, exp) def test_retrieve_biom_tables_none(self): new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis", Analysis(1)) self.assertEqual(new.biom_tables, None) def test_set_step(self): new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis", Analysis(1)) new.step = 2 sql = "SELECT * FROM qiita.analysis_workflow WHERE analysis_id = 3" obs = self.conn_handler.execute_fetchall(sql) self.assertEqual(obs, [[3, 2]]) def test_set_step_twice(self): new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis", Analysis(1)) new.step = 2 new.step = 4 sql = "SELECT * FROM qiita.analysis_workflow WHERE analysis_id = 3" obs = self.conn_handler.execute_fetchall(sql) self.assertEqual(obs, [[3, 4]]) def test_retrieve_step(self): new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis", Analysis(1)) new.step = 2 self.assertEqual(new.step, 2) def test_retrieve_step_new(self): new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis", Analysis(1)) with self.assertRaises(ValueError): new.step def test_retrieve_step_locked(self): self.analysis.status = "public" with self.assertRaises(QiitaDBStatusError): self.analysis.step = 3 def test_retrieve_jobs(self): self.assertEqual(self.analysis.jobs, [1, 2]) def test_retrieve_jobs_none(self): new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis", Analysis(1)) self.assertEqual(new.jobs, None) def test_retrieve_pmid(self): self.assertEqual(self.analysis.pmid, "121112") def test_retrieve_pmid_none(self): new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis", Analysis(1)) self.assertEqual(new.pmid, None) def test_set_pmid(self): self.analysis.pmid = "11211221212213" self.assertEqual(self.analysis.pmid, "11211221212213") def test_retrieve_mapping_file(self): exp = join(self.fp, "1_analysis_mapping.txt") obs = self.analysis.mapping_file self.assertEqual(obs, exp) self.assertTrue(exists(exp)) def test_retrieve_mapping_file_none(self): new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis", Analysis(1)) obs = new.mapping_file self.assertEqual(obs, None) # def test_get_parent(self): # raise NotImplementedError() # def test_get_children(self): # raise NotImplementedError() def test_add_samples(self): new = Analysis.create(User("[email protected]"), "newAnalysis", "A New Analysis") new.add_samples([(1, '1.SKB8.640193'), (1, '1.SKD5.640186')]) exp = {1: ['1.SKB8.640193', '1.SKD5.640186']} self.assertEqual(new.samples, exp) def test_remove_samples_both(self): self.analysis.remove_samples(proc_data=(1, ), samples=('1.SKB8.640193', )) exp = {1: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']} self.assertEqual(self.analysis.samples, exp) def test_remove_samples_samples(self): self.analysis.remove_samples(samples=('1.SKD8.640184', )) exp = {1: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180']} self.assertEqual(self.analysis.samples, exp) def test_remove_samples_processed_data(self): self.analysis.remove_samples(proc_data=(1, )) exp = {} self.assertEqual(self.analysis.samples, exp) def test_share(self): self.analysis.share(User("[email protected]")) self.assertEqual(self.analysis.shared_with, ["[email protected]", "[email protected]"]) def test_unshare(self): self.analysis.unshare(User("[email protected]")) self.assertEqual(self.analysis.shared_with, []) def test_get_samples(self): obs = self.analysis._get_samples() exp = {1: ['1.SKB7.640196', '1.SKB8.640193', '1.SKD8.640184', '1.SKM4.640180', '1.SKM9.640192']} self.assertEqual(obs, exp) def test_build_mapping_file(self): samples = {1: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']} self.analysis._build_mapping_file(samples, conn_handler=self.conn_handler) obs = self.analysis.mapping_file self.assertEqual(obs, self.map_fp) with open(self.map_fp) as f: mapdata = f.readlines() # check some columns for correctness obs = [line.split('\t')[0] for line in mapdata] exp = ['#SampleID', '1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'] self.assertEqual(obs, exp) obs = [line.split('\t')[1] for line in mapdata] exp = ['BarcodeSequence', 'AGCGCTCACATC', 'TGAGTGGTCTGT', 'CGGCCTAAGTTC'] self.assertEqual(obs, exp) obs = [line.split('\t')[2] for line in mapdata] exp = ['LinkerPrimerSequence', 'GTGCCAGCMGCCGCGGTAA', 'GTGCCAGCMGCCGCGGTAA', 'GTGCCAGCMGCCGCGGTAA'] self.assertEqual(obs, exp) obs = [line.split('\t')[19] for line in mapdata] exp = ['host_subject_id', '1001:M7', '1001:D9', '1001:M8'] self.assertEqual(obs, exp) obs = [line.split('\t')[47] for line in mapdata] exp = ['tot_org_carb', '5.0', '4.32', '5.0'] self.assertEqual(obs, exp) obs = [line.split('\t')[-1] for line in mapdata] exp = ['Description\n'] + ['Cannabis Soil Microbiome\n'] * 3 self.assertEqual(obs, exp) def test_build_mapping_file_duplicate_samples(self): samples = {1: ['1.SKB8.640193', '1.SKB8.640193', '1.SKD8.640184']} with self.assertRaises(ValueError): self.analysis._build_mapping_file(samples, conn_handler=self.conn_handler) def test_build_biom_tables(self): samples = {1: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']} self.analysis._build_biom_tables(samples, 100, conn_handler=self.conn_handler) obs = self.analysis.biom_tables self.assertEqual(obs, {'18S': self.biom_fp}) table = load_table(self.biom_fp) obs = set(table.ids(axis='sample')) exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'} self.assertEqual(obs, exp) obs = table.metadata('1.SKB8.640193') exp = {'Study': 'Identification of the Microbiomes for Cannabis Soils', 'Processed_id': 1} self.assertEqual(obs, exp) def test_build_files(self): self.analysis.build_files() def test_build_files_raises_type_error(self): with self.assertRaises(TypeError): self.analysis.build_files('string') with self.assertRaises(TypeError): self.analysis.build_files(100.5) def test_build_files_raises_value_error(self): with self.assertRaises(ValueError): self.analysis.build_files(0) with self.assertRaises(ValueError): self.analysis.build_files(-10) def test_add_file(self): fp = join(get_mountpoint('analysis')[0][1], 'testfile.txt') with open(fp, 'w') as f: f.write('testfile!') self.analysis._add_file('testfile.txt', 'plain_text', '18S') obs = self.conn_handler.execute_fetchall( 'SELECT * FROM qiita.filepath WHERE filepath_id = 19') exp = [[19, 'testfile.txt', 9, '3675007573', 1, 1]] self.assertEqual(obs, exp) obs = self.conn_handler.execute_fetchall( 'SELECT * FROM qiita.analysis_filepath WHERE filepath_id = 19') exp = [[1, 19, 2]] self.assertEqual(obs, exp) @qiita_test_checker() class TestCollection(TestCase): def setUp(self): self.collection = Collection(1) def test_create(self): Collection.create(User('[email protected]'), 'TestCollection2', 'Some desc') obs = self.conn_handler.execute_fetchall( 'SELECT * FROM qiita.collection WHERE collection_id = 2') exp = [[2, '[email protected]', 'TestCollection2', 'Some desc', 1]] self.assertEqual(obs, exp) def test_create_no_desc(self): Collection.create(User('[email protected]'), 'Test Collection2') obs = self.conn_handler.execute_fetchall( 'SELECT * FROM qiita.collection WHERE collection_id = 2') exp = [[2, '[email protected]', 'Test Collection2', None, 1]] self.assertEqual(obs, exp) def test_delete(self): Collection.delete(1) obs = self.conn_handler.execute_fetchall( 'SELECT * FROM qiita.collection') exp = [] self.assertEqual(obs, exp) def test_delete_public(self): self.collection.status = 'public' with self.assertRaises(QiitaDBStatusError): Collection.delete(1) obs = self.conn_handler.execute_fetchall( 'SELECT * FROM qiita.collection') exp = [[1, '[email protected]', 'TEST_COLLECTION', 'collection for testing purposes', 2]] self.assertEqual(obs, exp) def test_retrieve_name(self): obs = self.collection.name exp = "TEST_COLLECTION" self.assertEqual(obs, exp) def test_set_name(self): self.collection.name = "NeW NaMe 123" self.assertEqual(self.collection.name, "NeW NaMe 123") def test_set_name_public(self): self.collection.status = "public" with self.assertRaises(QiitaDBStatusError): self.collection.name = "FAILBOAT" def test_retrieve_desc(self): obs = self.collection.description exp = "collection for testing purposes" self.assertEqual(obs, exp) def test_set_desc(self): self.collection.description = "NeW DeSc 123" self.assertEqual(self.collection.description, "NeW DeSc 123") def test_set_desc_public(self): self.collection.status = "public" with self.assertRaises(QiitaDBStatusError): self.collection.description = "FAILBOAT" def test_retrieve_owner(self): obs = self.collection.owner exp = "[email protected]" self.assertEqual(obs, exp) def test_retrieve_analyses(self): obs = self.collection.analyses exp = [1] self.assertEqual(obs, exp) def test_retrieve_highlights(self): obs = self.collection.highlights exp = [1] self.assertEqual(obs, exp) def test_retrieve_shared_with(self): obs = self.collection.shared_with exp = ["[email protected]"] self.assertEqual(obs, exp) def test_add_analysis(self): self.collection.add_analysis(Analysis(2)) obs = self.collection.analyses exp = [1, 2] self.assertEqual(obs, exp) def test_remove_analysis(self): self.collection.remove_analysis(Analysis(1)) obs = self.collection.analyses exp = [] self.assertEqual(obs, exp) def test_highlight_job(self): self.collection.highlight_job(Job(2)) obs = self.collection.highlights exp = [1, 2] self.assertEqual(obs, exp) def test_remove_highlight(self): self.collection.remove_highlight(Job(1)) obs = self.collection.highlights exp = [] self.assertEqual(obs, exp) def test_share(self): self.collection.share(User("[email protected]")) obs = self.collection.shared_with exp = ["[email protected]", "[email protected]"] self.assertEqual(obs, exp) def test_unshare(self): self.collection.unshare(User("[email protected]")) obs = self.collection.shared_with exp = [] self.assertEqual(obs, exp) if __name__ == "__main__": main()
bsd-3-clause
mwindau/praktikum
v101/eigentraegheitsmoment.py
1
4386
import numpy as np from astropy.io import ascii from uncertainties import ufloat from uncertainties.unumpy import (nominal_values as noms, std_devs as stds) import uncertainties.unumpy as unp from scipy.optimize import curve_fit import matplotlib.pyplot as plt w, d, T_messung = np.genfromtxt('Rohdaten/gewichte.txt', unpack=True) abstand = unp.uarray(d, 0.02) # in cm abstand *= 1e-2 # Umrechnung von cm in m T = unp.uarray(T_messung, 0.0) # in s T = T / 5 # Es wurden fünf Perioden gemessen # Standardabweichung des Mittelwerts def meanDerivation(a): # a ist ein Array N = len(a) # Anzahl an Messwerten temp = 0. mittelwert = np.mean(a) # Mittelwert der Messwerte for value in a: temp += (value-mittelwert)**2 temp *= 1/(N*(N-1)) return unp.sqrt(temp) # "masselose" Stange m_stange = ufloat(135.29, 0.01) # in g m_stange *= 1e-3 # Umrechnung von g in kg Durchmesser_stange = [0.0610, 0.0610, 0.0610, 0.0610, 0.0610] # in m r_stange = ufloat(np.mean(Durchmesser_stange), meanDerivation(Durchmesser_stange)) # Ist noch der Durchmesser r_stange *= 0.5 # Nun ist es der Radius l_stange = ufloat(0.6282, 0.00001) # laenge der Stange in m l_stange *= 2 # Das vorherige war pro Seite I_stange = m_stange*(l_stange**2)/12 print("Mittelwert und Standardabweichung des Radius' der Stange") print(np.mean(Durchmesser_stange), meanDerivation(Durchmesser_stange)) print("Trägheitsmoment der \"masselosen\" Stange") print(I_stange) # Trägheitsmoment der Massen def I_Zylinder(m, R, h): temp = (R**2)/4 + (h**2)/12 temp *= m return temp # Satz von Steiner, Verschiebung um a def Steiner(I, m, a): return I + m*(a**2) # Gewichte m_c = ufloat(261.54, 0.01) # Gewicht in g d_c = ufloat(4.510, 0.001) # Durchmesser Gewicht C in cm h_c = ufloat(2.030, 0.001) # Höhe Gewicht C in cm m_b = ufloat(261.55, 0.01) # Gewicht in g d_b = ufloat(4.510, 0.001) # Durchmesser Gewicht B in cm h_b = ufloat(2.040, 0.001) # Höhe Gewicht B in cm d_c *= 1e-2 # Umrechnung von cm in m h_c *= 1e-2 # Umrechnung von cm in m d_b *= 1e-2 # Umrechnung von cm in m h_b *= 1e-2 # Umrechnung von cm in m m_c *= 1e-3 # Umrechnung von g in kg m_b *= 1e-3 # Umrechnung von g in kg I_c = I_Zylinder(m_c, d_c/2, h_c) I_b = I_Zylinder(m_b, d_b/2, h_b) # I_c = Steiner(I_c, m_c, abstand+h_c/2) # I_b = Steiner(I_b, m_b, abstand+h_b/2) print("Trägheitsmoment von Gewicht C ohne Steiner:") print(I_c) print("Trägheitsmoment von Gewicht B ohne Steiner:") print(I_b) # Fitfunktion def f(x, m, b): return m * x + b # Lineare Regression alpha = abstand + h_c/2 # Da der Abstand nicht von der Zylindermitte gemessen wurde, # sondern vom Rand temp1 = alpha**2 # a² in m² x = noms(temp1) x_err = stds(temp1) temp2 = T**2 # T² in s² y = noms(temp2) y_err = stds(temp2) print('T² in s²: ', y) print('a² in m²: ', x) x_plot = np.linspace(0, 0.1) params, covariance = curve_fit(f, x, y) # covariance is the covariance matrix errors = np.sqrt(np.diag(covariance)) print('Parameter für Data 2 gefittet: ') print(' m =', params[0], '±', errors[0]) print(' b =', params[1], '±', errors[1]) # Plot plt.errorbar(x*1e2, y, xerr=x_err*1e2, yerr=y_err, fmt='rx', label='Messwerte') # plt.plot(x*1e2, y, 'rx', label='Messwerte') plt.plot(x_plot*1e2, f(x_plot, *params), 'b-', label='Regression') plt.legend(loc='best') plt.xlabel(r'$\alpha^2 \; / \; 10^2\mathrm{m^2}$') plt.ylabel(r'$T^2 \; / \; \mathrm{s^2}$') plt.xlim(0.5, 8) plt.ylim(10, 70) plt.grid() plt.tight_layout() # plt.show() plt.savefig('build/plot_eigen.pdf') plt.clf() # Eigenträgheitsmoment I_D ausrechnen m = ufloat(params[0], errors[0]) # in m²/s² b = ufloat(params[1], errors[1]) # in s² # I = I_D + I_c + I_b + I_stange D = (4*(np.pi**2) * (m_b + m_c)) / m # D_1 = ufloat(0.000381, 0.000007) # Aus Aufgabenteil 1 I_D = b*D/(4*(np.pi**2)) - (I_stange + I_b + I_c) print("D aus dem Fit berechnet:") print(D) print("Eigenträgheitsmoment des Aufbaus aus dem Fit berechnet:") print("(Dazu wurde das D aus dem Fit verwendet)") print(I_D) # Ergebnisse speichern x *= 1e2 # Umrechnung m in cm x_err *= 1e2 x = np.round(x, 3) x_err = np.round(x_err, 3) y = np.round(y, 2) y_err = np.round(y_err, 2) abstand *= 1e2 ascii.write( [noms(abstand), T_messung, y, y_err, x, x_err], 'build/table_eigen.tex', format='latex')
mit
jonyroda97/redbot-amigosprovaveis
lib/matplotlib/backends/backend_qt4.py
2
1438
from __future__ import (absolute_import, division, print_function, unicode_literals) import six from six import unichr import os import re import signal import sys from matplotlib._pylab_helpers import Gcf from matplotlib.backend_bases import ( FigureCanvasBase, FigureManagerBase, NavigationToolbar2, TimerBase, cursors) from matplotlib.figure import Figure from matplotlib.widgets import SubplotTool from .qt_compat import QtCore, QtWidgets, _getSaveFileName, __version__ from .backend_qt5 import ( backend_version, SPECIAL_KEYS, SUPER, ALT, CTRL, SHIFT, MODIFIER_KEYS, cursord, _create_qApp, _BackendQT5, TimerQT, MainWindow, FigureManagerQT, NavigationToolbar2QT, SubplotToolQt, error_msg_qt, exception_handler) from .backend_qt5 import FigureCanvasQT as FigureCanvasQT5 DEBUG = False class FigureCanvasQT(FigureCanvasQT5): def wheelEvent(self, event): x = event.x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.y() # from QWheelEvent::delta doc steps = event.delta()/120 if (event.orientation() == QtCore.Qt.Vertical): FigureCanvasBase.scroll_event(self, x, y, steps) if DEBUG: print('scroll event: delta = %i, ' 'steps = %i ' % (event.delta(), steps)) @_BackendQT5.export class _BackendQT4(_BackendQT5): FigureCanvas = FigureCanvasQT
gpl-3.0
keurfonluu/StochOPy
stochopy/gui/gui.py
1
28859
# -*- coding: utf-8 -*- """ StochOPy Viewer is a GUI for StochOPy to see how popular stochastic algorithms perform on different benchmark functions. Author: Keurfon Luu <[email protected]> License: MIT """ from __future__ import absolute_import, division, print_function, unicode_literals from matplotlib.figure import Figure from matplotlib import animation import numpy as np from ..evolutionary_algorithm import Evolutionary from ..monte_carlo import MonteCarlo from ..benchmark_functions import BenchmarkFunction import sys if sys.version_info[0] < 3: import Tkinter as tk import tkFileDialog as tkfile import tkMessageBox as tkmessage import ttk import tkFont as font else: import tkinter as tk import tkinter.filedialog as tkfile import tkinter.messagebox as tkmessage import tkinter.ttk as ttk from tkinter import font from .ttk_spinbox import Spinbox try: import cPickle as pickle except ImportError: import pickle __all__ = [ "StochOGUI", "main" ] class StochOGUI(): """ GUI for StochOPy. StochOPy Viewer provides a GUI to manipulate solvers on popular benchmark functions. Parameters ---------- master : tkinter object tkinter root window. """ master = None anim_running = False first_run = True MAX_SEED = 999999 FUNCOPT = ( "Ackley", "Quartic", "Quartic noise", "Rastrigin", "Rosenbrock", "Sphere", "Styblinski-Tang" ) EAOPT = ( "CPSO", "PSO", "DE", "CMAES", "VDCMA" ) MCOPT = ( "Hastings", "Hamiltonian", ) STRATOPT = ( "rand1", "rand2", "best1", "best2" ) MIN_POPSIZE = { "cpso": 2, "pso": 2, "de": 4, "cmaes": 4, "vdcma": 4 } def __init__(self, master): self.master = master master.title("StochOPy Viewer") master.protocol("WM_DELETE_WINDOW", self.close_window) master.geometry("900x600") master.minsize(900, 600) master.maxsize(900, 600) default_font = font.nametofont("TkDefaultFont") default_font.configure(family = "Helvetica", size = 9) master.option_add("*Font", default_font) self.define_variables() self.trace_variables() self.init_variables() self.menubar() self.frame1() self.frame2() self.footer() self.select_widget(self.solver_name.get()) def about(self): about = "StochOPy Viewer 1.3" + "\n" \ + "Created by Keurfon Luu" tkmessage.showinfo("About", about) def menubar(self): menubar = tk.Menu(self.master) # File filemenu = tk.Menu(menubar, tearoff = 0) filemenu.add_command(label = "Export models", command = self.export_models) filemenu.add_command(label = "Export fitness", command = self.export_fitness) filemenu.add_separator() filemenu.add_command(label = "Exit", command = self.close_window) # Help helpmenu = tk.Menu(menubar, tearoff = 0) helpmenu.add_command(label = "About", command = self.about) # Display menu bar menubar.add_cascade(label = "File", menu = filemenu) menubar.add_cascade(label = "Help", menu = helpmenu) self.master.config(menu = menubar) def frame1(self): self.frame1 = ttk.LabelFrame(self.master, text = "Parameters", borderwidth = 2, relief = "groove") self.frame1.place(bordermode = "outside", relwidth = 0.99, relheight = 0.21, relx = 0, x = 5, y = 5, anchor = "nw") self.frame1.first_run = True # function function_label = ttk.Label(self.frame1, text = "Function") function_option_menu = ttk.OptionMenu(self.frame1, self.function, self.function.get(), *sorted(self.FUNCOPT)) # max_iter max_iter_label = ttk.Label(self.frame1, text = "Maximum number of iterations") max_iter_spinbox = Spinbox(self.frame1, from_ = 2, to_ = 9999, increment = 1, textvariable = self.max_iter, width = 6, justify = "right", takefocus = True) # fps fps_label = ttk.Label(self.frame1, text = "Delay between frames (ms)") fps_spinbox = Spinbox(self.frame1, from_ = 1, to_ = 1000, increment = 1, textvariable = self.interval, width = 6, justify = "right", takefocus = True) # seed seed_button = ttk.Checkbutton(self.frame1, text = "Fix seed", variable = self.fix_seed, takefocus = False) seed_spinbox = Spinbox(self.frame1, from_ = 0, to_ = self.MAX_SEED, increment = 1, textvariable = self.seed, width = 6, justify = "right", takefocus = True) # solver solver_label = ttk.Label(self.frame1, text = "Solver") solver_option_menu = ttk.OptionMenu(self.frame1, self.solver_name, self.solver_name.get(), *(self.EAOPT + self.MCOPT), command = self.select_widget) # constrain constrain_button = ttk.Checkbutton(self.frame1, text = "Constrain", variable = self.constrain, takefocus = False) # Layout function_label.place(relx = 0., x = 5, y = 5, anchor = "nw") function_option_menu.place(relx = 0., x = 75, y = 3, anchor = "nw") max_iter_label.place(relx = 0., x = 5, y = 30, anchor = "nw") max_iter_spinbox.place(width = 80, relx = 0., x = 220, y = 30, anchor = "nw") fps_label.place(relx = 0., x = 5, y = 55, anchor = "nw") fps_spinbox.place(width = 80, relx = 0., x = 220, y = 55, anchor = "nw") seed_button.place(relx = 0., x = 5, y = 80, anchor = "nw") seed_spinbox.place(width = 80, relx = 0., x = 220, y = 80, anchor = "nw") solver_label.place(relx = 0.35, x = 0, y = 5, anchor = "nw") solver_option_menu.place(relx = 0.35, x = 50, y = 3, anchor = "nw") constrain_button.place(relx = 0.35, x = 0, y = 80, anchor = "nw") def frame1_pop(self): if not self.frame1.first_run: self.frame1.pop.forget() self.frame1.pop = ttk.Frame(self.frame1, borderwidth = 0) self.frame1.pop.place(width = 170, height = 25, relx = 0.35, y = 30, anchor = "nw") if self.solver_name.get() in self.EAOPT: # popsize popsize_label = ttk.Label(self.frame1.pop, text = "Population size") popsize_spinbox = Spinbox(self.frame1.pop, from_ = 1, to_ = 999, increment = 1, textvariable = self.popsize, width = 3, justify = "right", takefocus = True) # Layout popsize_label.place(relx = 0, x = 0, y = 0, anchor = "nw") popsize_spinbox.place(width = 60, relx = 0, x = 110, y = 0, anchor = "nw") def frame1_sync(self): if not self.frame1.first_run: self.frame1.sync.forget() self.frame1.sync = ttk.Frame(self.frame1, borderwidth = 0) self.frame1.sync.place(width = 170, height = 25, relx = 0.35, y = 55, anchor = "nw") if self.solver_name.get() in [ "CPSO", "PSO", "DE" ]: # sync sync_button = ttk.Checkbutton(self.frame1.sync, text = "Synchronize", variable = self.sync, takefocus = False) # Layout sync_button.place(relx = 0, x =0, y = 0, anchor = "nw") def frame2(self): from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg self.frame2 = ttk.Frame(self.master, borderwidth = 2, relief = "groove") self.frame2.place(bordermode = "outside", relwidth = 0.99, relheight = 0.72, relx = 0, rely = 0.22, x = 5, y = 5, anchor = "nw") self.frame2.canvas = ttk.Frame(self.frame2, borderwidth = 0) self.frame2.canvas.place(relwidth = 1, relheight = 1, relx = 0, anchor = "nw") self.fig = Figure(figsize = (13, 6), facecolor = "white") self.canvas = FigureCanvasTkAgg(self.fig, master = self.frame2.canvas) self.fig.canvas.mpl_connect("button_press_event", self._onClick) self.canvas.get_tk_widget().pack() def init_widget(self): if not self.frame1.first_run: self.frame1.sliders.forget() else: self.frame1.first_run = False self.frame1.sliders = ttk.Frame(self.frame1, borderwidth = 0) self.frame1.sliders.place(relwidth = 0.45, relheight = 1., relx = 0.55, anchor = "nw") def select_widget(self, solver): self.frame1_pop() self.frame1_sync() if solver == "CPSO": self.cpso_widget() elif solver == "PSO": self.pso_widget() elif solver == "DE": self.de_widget() elif solver == "CMAES": self.cmaes_widget() elif solver == "VDCMA": self.cmaes_widget() elif solver == "Hastings": self.hastings_widget() elif solver == "Hamiltonian": self.hamiltonian_widget() def _label(self, text, position, kwargs = {}): label = ttk.Label(self.frame1.sliders, text = text, **kwargs) if position == 1: label.place(relx = 0, x = 0, y = 5, anchor = "nw") elif position == 2: label.place(relx = 0, x = 0, y = 50, anchor = "nw") elif position == 3: label.place(relx = 0.5, x = 0, y = 5, anchor = "nw") elif position == 4: label.place(relx = 0.5, x = 0, y = 50, anchor = "nw") return label def _scale(self, from_, to, resolution, variable, position, command = None, kwargs = {}): if command is None: command = lambda s: variable.set(round(float(s), 3)) scale = ttk.Scale(self.frame1.sliders, from_ = from_, to = to, variable = variable, orient = "horizontal", length = 20, command = command, takefocus = False, **kwargs) if position == 1: scale.place(relwidth = 0.35, relx = 0, x = 0, y = 25, anchor = "nw") elif position == 2: scale.place(relwidth = 0.35, relx = 0, x = 0, y = 70, anchor = "nw") elif position == 3: scale.place(relwidth = 0.35, relx = 0.5, x = 0, y = 25, anchor = "nw") elif position == 4: scale.place(relwidth = 0.35, relx = 0.5, x = 0, y = 70, anchor = "nw") return scale def _entry(self, variable, position, kwargs = {}): entry = ttk.Entry(self.frame1.sliders, textvariable = variable, justify = "right", takefocus = True, **kwargs) if position == 1: entry.place(width = 45, relx = 0.35, x = -3, y = 26, anchor = "nw") elif position == 2: entry.place(width = 45, relx = 0.35, x = -3, y = 71, anchor = "nw") elif position == 3: entry.place(width = 45, relx = 0.85, x = -3, y = 26, anchor = "nw") elif position == 4: entry.place(width = 45, relx = 0.85, x = -3, y = 71, anchor = "nw") return entry def pso_widget(self): # Initialize widget self.init_widget() # Omega self._label("Inertial weight", 1) self._scale(0., 1., 0.01, self.w, 1) self._entry(self.w, 1) # C1 self._label("Cognition parameter", 2) self._scale(0., 4., 0.01, self.c1, 2) self._entry(self.c1, 2) # C2 self._label("Sociability parameter", 3) self._scale(0., 4., 0.01, self.c2, 3) self._entry(self.c2, 3) def cpso_widget(self): # Initialize widget self.pso_widget() # Gamma self._label("Competitivity parameter", 4) self._scale(0., 2., 0.01, self.gamma, 4) self._entry(self.gamma, 4) def de_widget(self): # Initialize widget self.init_widget() # strategy self.strategy_label = ttk.Label(self.frame1.sliders, text = "Strategy") self.strategy_option_menu = ttk.OptionMenu(self.frame1.sliders, self.strategy, self.strategy.get(), *sorted(self.STRATOPT)) self.strategy_label.place(relx = 0, x = 0, y = 5, anchor = "nw") self.strategy_option_menu.place(relx = 0, x = 70, y = 3, anchor = "nw") # CR self._label("Crossover probability", 2) self._scale(0., 1., 0.01, self.CR, 2) self._entry(self.CR, 2) # F self._label("Differential weight", 4) self._scale(0., 2., 0.01, self.F, 4) self._entry(self.F, 4) def cmaes_widget(self): # Initialize widget self.init_widget() # sigma self._label("Step size", 1) self._scale(0.01, 10., 0.01, self.sigma, 1) self._entry(self.sigma, 1) # mu_perc self._label("Percentage of offsprings", 2) self._scale(0.01, 1., 0.01, self.mu_perc, 2) self._entry(self.mu_perc, 2) def hastings_widget(self): # Initialize widget self.init_widget() vcmd1 = self.frame1.pop.register(self._validate_mcmc_stepsize_x1) vcmd2 = self.frame1.pop.register(self._validate_mcmc_stepsize_x2) # stepsize x1 self._label("Step size X1", 1) ss = self._scale(-4., 0., 0.01, self.log_mcmc_stepsize_x1, 1, lambda val: self.mcmc_stepsize_x1.set(round(10.**float(val), 4))) ss.set(np.log10(self.mcmc_stepsize_x1.get())) self._entry(self.mcmc_stepsize_x1, 1, kwargs = dict(validate = "key", validatecommand = (vcmd1, "%P"))) # stepsize x2 self._label("Step size X2", 2) ss = self._scale(-4., 0., 0.01, self.log_mcmc_stepsize_x2, 2, lambda val: self.mcmc_stepsize_x2.set(round(10.**float(val), 4))) ss.set(np.log10(self.mcmc_stepsize_x2.get())) self._entry(self.mcmc_stepsize_x2, 2, kwargs = dict(validate = "key", validatecommand = (vcmd2, "%P"))) def hamiltonian_widget(self): # Initialize widget self.init_widget() vcmd = self.frame1.pop.register(self._validate_hmc_stepsize) # stepsize self._label("Step size", 1) ss = self._scale(-4., 0., 0.01, self.log_hmc_stepsize, 1, lambda val: self.hmc_stepsize.set(round(10.**float(val), 4))) ss.set(np.log10(self.hmc_stepsize.get())) self._entry(self.hmc_stepsize, 1, kwargs = dict(validate = "key", validatecommand = (vcmd, "%P"))) # Leap self._label("Number of leap frog steps", 2) self._scale(1, 100, 1, self.n_leap, 2, lambda val: self.n_leap.set(int(np.floor(float(val))))) self._entry(self.n_leap, 2) def _validate_mcmc_stepsize_x1(self, val): try: val = float(val) if val > 0.: self.log_mcmc_stepsize_x1.set(np.log10(val)) except ValueError: pass return True def _validate_mcmc_stepsize_x2(self, val): try: val = float(val) if val > 0.: self.log_mcmc_stepsize_x2.set(np.log10(val)) except ValueError: pass return True def _validate_hmc_stepsize(self, val): try: val = float(val) if val > 0.: self.log_hmc_stepsize.set(np.log10(val)) except ValueError: pass return True def footer(self): # Run button run_button = ttk.Button(self.master, text = "Run", command = self.run) # Exit button exit_button = ttk.Button(self.master, text = "Exit", command = self.close_window) # Layout run_button.place(relwidth = 0.1, relx = 0.9, rely = 1, x = -5, y = -5, anchor = "se") exit_button.place(relwidth = 0.1, relx = 1, rely = 1, x = -5, y = -5, anchor = "se") def run(self): if self.check_variables(): # To avoid errors when clicking in the window if self.first_run: self.first_run = False # To ensure repeatability if needed if not self.fix_seed.get(): self.seed.set(np.random.randint(self.MAX_SEED)) np.random.seed(self.seed.get()) # Initialize function func = "_".join(self.function.get().split()).lower() self.bf = BenchmarkFunction(func, n_dim = 2) # Solve solver_name = self.solver_name.get().lower() if solver_name in [ "hastings", "hamiltonian" ]: if solver_name == "hastings": stepsize = [ self.mcmc_stepsize_x1.get(), self.mcmc_stepsize_x2.get() ] else: stepsize = self.hmc_stepsize.get() self.solver = MonteCarlo(max_iter = self.max_iter.get(), constrain = bool(self.constrain.get()), **self.bf.get()) self.solver.sample(sampler = solver_name, stepsize = stepsize, n_leap = self.n_leap.get()) elif solver_name in [ "cpso", "pso", "de", "cmaes", "vdcma" ]: self.solver = Evolutionary(popsize = self.popsize.get(), max_iter = self.max_iter.get(), constrain = bool(self.constrain.get()), snap = True, **self.bf.get()) self.solver.optimize(solver = solver_name, sync = bool(self.sync.get()), w = self.w.get(), c1 = self.c1.get(), c2 = self.c2.get(), gamma = self.gamma.get(), CR = self.CR.get(), F = self.F.get(), strategy = self.strategy.get().lower(), sigma = self.sigma.get(), mu_perc = self.mu_perc.get()) # Animate self.animate(interval = self.interval.get(), yscale = "log") def animate(self, interval = 100, nx = 101, ny = 101, n_levels = 10, yscale = "linear", repeat = True, kwargs = {}): # Clear figure if self.anim_running: self.anim.event_source.stop() self.fig.clear() # Initialize parameters models = self.solver.models if self.solver._solver in [ "hastings", "hamiltonian" ]: func = self._update_monte_carlo gfit = self.solver.energy frames = models.shape[0] linestyle = "--" ylabel = "Fitness" elif self.solver._solver in [ "cpso", "pso", "de", "cmaes", "vdcma" ]: func = self._update_evolutionary gfit = self._gfit(self.solver.energy) frames = models.shape[-1] linestyle = "none" ylabel = "Global best fitness" max_iter = len(gfit) it = np.linspace(1, max_iter, max_iter) # Initialize axis ax1 = self.fig.add_subplot(1, 2, 1) ax2 = self.fig.add_subplot(1, 2, 2) self.bf.plot(axes = ax1, cont_kws = kwargs) self.scatplot, = ax1.plot([], [], linestyle = linestyle, color = "black", marker = "o", markersize = 12, markerfacecolor = "white", markeredgecolor = "black", animated = True) ax2.plot(it, gfit, linestyle = "-.", linewidth = 1, color = "black") self.enerplot, = ax2.plot([], [], linestyle = "-", linewidth = 2, color = "red") ax1.set_xlabel("X1", fontsize = 12) ax1.set_ylabel("X2", fontsize = 12) ax1.set_xlim(self.bf._lower[0], self.bf._upper[0]) ax1.set_ylim(self.bf._lower[1], self.bf._upper[1]) ax2.set_xlim((1, max_iter)) ax2.set_yscale(yscale) ax2.set_ylabel(ylabel, fontsize = 12) ax2.grid(True, linestyle = ":") self.iter = ax2.text(0.99, 0.99, "", va = "top", ha = "right", fontsize = 10, transform = ax2.transAxes, animated = True) # Animate self.anim_running = True self.anim = animation.FuncAnimation(self.fig, func, fargs = (models, gfit), frames = frames, interval = interval, repeat = repeat, blit = True) self.fig.tight_layout() def _update_monte_carlo(self, i, models, gfit): self.scatplot.set_data(models[:i,0], models[:i,1]) self.enerplot.set_xdata(np.linspace(1, i+1, i+1)) self.enerplot.set_ydata(gfit[:i+1]) self.iter.set_text("Sample %d" % (i+1)) return self.scatplot, self.enerplot, self.iter, def _update_evolutionary(self, i, models, gfit): self.scatplot.set_data(models[:,0,i], models[:,1,i]) self.enerplot.set_xdata(np.linspace(1, i+1, i+1)) self.enerplot.set_ydata(gfit[:i+1]) self.iter.set_text("Iteration %d" % (i+1)) return self.scatplot, self.enerplot, self.iter, def _gfit(self, energy): gfit = [ energy[:,0].min() ] for i in range(1, energy.shape[1]): gfit.append(min(gfit[i-1], energy[:,i].min())) return np.array(gfit) def _onClick(self, event): if not self.first_run: if self.anim_running: self.anim.event_source.stop() self.anim_running = False else: self.anim.event_source.start() self.anim_running = True def export_models(self): if self._check_run(): filename = tkfile.asksaveasfilename(title = "Export models", filetypes = [ ("Pickle", ".pickle") ], defaultextension = ".pickle") if len(filename) > 0: with open(filename, "wb") as f: pickle.dump(self.solver.models, f, protocol = pickle.HIGHEST_PROTOCOL) def export_fitness(self): if self._check_run(): filename = tkfile.asksaveasfilename(title = "Export fitness", filetypes = [ ("Pickle", ".pickle") ], defaultextension = ".pickle") if len(filename) > 0: with open(filename, "wb") as f: pickle.dump(self.solver.energy, f, protocol = pickle.HIGHEST_PROTOCOL) def _check_run(self): if self.first_run: tkmessage.showerror("Error", "No optimization performed yet.") return False else: return True def close_window(self): yes = tkmessage.askyesno("Exit", "Do you really want to quit?") if yes: self.close() def define_variables(self): self.solver_name = tk.StringVar(self.master) self.function = tk.StringVar(self.master) self.popsize = tk.IntVar(self.master) self.max_iter = tk.IntVar(self.master) self.interval = tk.IntVar(self.master) self.mcmc_stepsize_x1 = tk.DoubleVar(self.master) self.mcmc_stepsize_x2 = tk.DoubleVar(self.master) self.hmc_stepsize = tk.DoubleVar(self.master) self.log_mcmc_stepsize_x1 = tk.DoubleVar(self.master) self.log_mcmc_stepsize_x2 = tk.DoubleVar(self.master) self.log_hmc_stepsize = tk.DoubleVar(self.master) self.n_leap = tk.IntVar(self.master) self.w = tk.DoubleVar(self.master) self.c1 = tk.DoubleVar(self.master) self.c2 = tk.DoubleVar(self.master) self.gamma = tk.DoubleVar(self.master) self.CR = tk.DoubleVar(self.master) self.F = tk.DoubleVar(self.master) self.strategy = tk.StringVar(self.master) self.sigma = tk.DoubleVar(self.master) self.mu_perc = tk.DoubleVar(self.master) self.seed = tk.IntVar(self.master) self.fix_seed = tk.BooleanVar(self.master) self.constrain = tk.BooleanVar(self.master) self.sync = tk.BooleanVar(self.master) def trace_variables(self): self.solver_name.trace("w", self.callback) self.function.trace("w", self.callback) self.popsize.trace("w", self.callback) self.max_iter.trace("w", self.callback) self.interval.trace("w", self.callback) self.mcmc_stepsize_x1.trace("w", self.callback) self.mcmc_stepsize_x2.trace("w", self.callback) self.hmc_stepsize.trace("w", self.callback) self.log_mcmc_stepsize_x1.trace("w", self.callback) self.log_mcmc_stepsize_x2.trace("w", self.callback) self.log_hmc_stepsize.trace("w", self.callback) self.n_leap.trace("w", self.callback) self.w.trace("w", self.callback) self.c1.trace("w", self.callback) self.c2.trace("w", self.callback) self.gamma.trace("w", self.callback) self.CR.trace("w", self.callback) self.F.trace("w", self.callback) self.strategy.trace("w", self.callback) self.sigma.trace("w", self.callback) self.mu_perc.trace("w", self.callback) self.seed.trace("w", self.callback) self.fix_seed.trace("w", self.callback) self.constrain.trace("w", self.callback) self.sync.trace("w", self.callback) def init_variables(self): self.solver_name.set("CPSO") self.function.set("Rosenbrock") self.popsize.set(10) self.max_iter.set(200) self.interval.set(60) self.mcmc_stepsize_x1.set(0.01) self.mcmc_stepsize_x2.set(0.01) self.hmc_stepsize.set(0.001) self.log_mcmc_stepsize_x1.set(np.log10(self.mcmc_stepsize_x1.get())) self.log_mcmc_stepsize_x2.set(np.log10(self.mcmc_stepsize_x2.get())) self.log_hmc_stepsize.set(np.log10(self.hmc_stepsize.get())) self.n_leap.set(10) self.w.set(0.73) self.c1.set(1.496) self.c2.set(1.496) self.gamma.set(1.) self.CR.set(0.1) self.F.set(0.5) self.strategy.set("best2") self.sigma.set(0.5) self.mu_perc.set(0.5) self.seed.set(42) self.fix_seed.set(False) self.constrain.set(True) self.sync.set(False) def check_variables(self): # Check popsize solver_name = self.solver_name.get().lower() if solver_name.upper() in self.EAOPT and self.popsize.get() < self.MIN_POPSIZE[solver_name]: tkmessage.showerror("Error", "For %s, population size should be greater than %d." \ % (solver_name.upper(), self.MIN_POPSIZE[solver_name]-1)) return False return True def close(self): self.master.quit() self.master.destroy() def callback(self, *args): pass def main(): """ Start StochOPy Viewer window. """ import matplotlib matplotlib.use("TkAgg") from sys import platform as _platform root = tk.Tk() root.resizable(0, 0) StochOGUI(root) s = ttk.Style() if _platform == "win32": s.theme_use("vista") elif _platform in [ "linux", "linux2" ]: s.theme_use("alt") elif _platform == "darwin": s.theme_use("aqua") root.mainloop()
mit
DessimozLab/treeCl
treeCl/clustering.py
1
21507
#!/usr/bin/env python from __future__ import print_function from __future__ import division from builtins import str from builtins import range from builtins import object # standard library # third party import numpy as np from scipy.cluster.hierarchy import fcluster, dendrogram from scipy.spatial.distance import squareform import fastcluster import skbio try: from Bio.Cluster import kmedoids Biopython_Unavailable = False except ImportError: print("Biopython unavailable - kmedoids clustering disabled") Biopython_Unavailable = True from sklearn.cluster import AffinityPropagation, DBSCAN, KMeans from sklearn.manifold import spectral_embedding # GMM was deprecated in scikit-learn version 0.18 and fully removed in 0.20 import pkg_resources sklearn_version = [int(x) for x in pkg_resources.get_distribution("scikit-learn").version.split('.')] USE_GAUSSIAN_MIXTURE = tuple(sklearn_version) >= (0, 20, 0) if USE_GAUSSIAN_MIXTURE: from sklearn.mixture import GaussianMixture else: from sklearn.mixture import GMM # treeCl from .distance_matrix import DistanceMatrix, rbf, binsearch_mask, kmask, kscale, affinity, laplace, eigen, \ double_centre, normalise_rows, CoordinateMatrix from .partition import Partition from .utils import enum from .errors import OptionError, isnumbercheck, rangecheck options = enum( "PRUNING_NONE", "PRUNING_ESTIMATE", "PRUNING_MANUAL", "LOCAL_SCALE_MEDIAN", "LOCAL_SCALE_ESTIMATE", "LOCAL_SCALE_MANUAL") methods = enum( "KMEANS", "GMM", "WARD") linkage = enum( "SINGLE", "COMPLETE", "AVERAGE", "WARD", "WEIGHTED", "CENTROID", "MEDIAN") mds = enum( "CLASSICAL", "METRIC") spectral = enum( "SPECTRAL", "KPCA", "ZELNIKMANOR") def _get_threshold(linkmat, nclusters): linkmat_size = len(linkmat) if nclusters <= 1: br_top = linkmat[linkmat_size - nclusters][2] else: br_top = linkmat[linkmat_size - nclusters + 1][2] if nclusters >= len(linkmat): br_bottom = 0 else: br_bottom = linkmat[linkmat_size - nclusters][2] threshold = 0.5 * (br_top + br_bottom) return threshold def _hclust(linkmat, nclusters): threshold = _get_threshold(linkmat, nclusters) t = fcluster(linkmat, threshold, criterion='distance') return Partition(t) class ClusteringManager(object): """ Clustering manager base class """ def __init__(self, dm): if isinstance(dm, np.ndarray): dm = DistanceMatrix.from_array(dm) if not isinstance(dm, DistanceMatrix): raise ValueError('Distance matrix should be a numpy array or treeCl.DistanceMatrix') self.dm = dm def __str__(self): return str(self.dm) def get_dm(self, noise): return self.dm.add_noise().values if noise else self.dm.values class EMMixin(object): """ Provide methods to do kmeans and GMM estimation """ @staticmethod def kmeans(nclusters, coords): est = KMeans(n_clusters=nclusters, n_init=50, max_iter=500) est.fit(coords) return Partition(est.labels_) @staticmethod def gmm(nclusters, coords, n_init=50, n_iter=500): if USE_GAUSSIAN_MIXTURE: est = GaussianMixture(n_components=nclusters, n_init=n_init, max_iter=n_iter) else: est = GMM(n_components=nclusters, n_init=n_init, n_iter=n_iter) est.fit(coords) return Partition(est.predict(coords)) def _check_val(opt, min_, max_): isnumbercheck(opt) rangecheck(opt, min_, max_) class Spectral(ClusteringManager, EMMixin): """ Manager for spectral clustering and Kernel PCA clustering """ def __init__(self, dm, pruning_option=options.PRUNING_NONE, scale_option=options.LOCAL_SCALE_MEDIAN, manual_pruning=None, manual_scale=None, verbosity=0): super(Spectral, self).__init__(dm) try: options.reverse[pruning_option] except KeyError: raise OptionError(pruning_option, list(options.reverse.values())) try: options.reverse[scale_option] except KeyError: raise OptionError(scale_option, list(options.reverse.values())) if pruning_option == options.PRUNING_MANUAL: _check_val(manual_pruning, 2, self.dm.df.shape[0]) if scale_option == options.LOCAL_SCALE_MANUAL: _check_val(manual_scale, 2, self.dm.df.shape[0]) self._pruning_option = pruning_option self._scale_option = scale_option self._manual_pruning = manual_pruning self._manual_scale = manual_scale self._verbosity = verbosity self._affinity = self.decompose() def __str__(self): return ('Spectral Clustering with local scaling:\n' 'Pruning option: {}\n' 'Scaling option: {}' .format(options.reverse[self._pruning_option], options.reverse[self._scale_option])) def decompose(self, noise=False, verbosity=0, logic='or', **kwargs): """ Use prune to remove links between distant points: prune is None: no pruning prune={int > 0}: prunes links beyond `prune` nearest neighbours prune='estimate': searches for the smallest value that retains a fully connected graph """ matrix = self.get_dm(noise) # get local scale estimate est_scale = None # ADJUST MASK if self._pruning_option == options.PRUNING_NONE: # Set kp to max value kp = len(matrix) - 1 mask = np.ones(matrix.shape, dtype=bool) elif self._pruning_option == options.PRUNING_MANUAL: # Manually set value of kp kp = self._manual_pruning mask = kmask(matrix, self._manual_pruning, logic=logic) elif self._pruning_option == options.PRUNING_ESTIMATE: # Must estimate value of kp kp, mask, est_scale = binsearch_mask(matrix, logic=logic) else: raise ValueError("Unexpected error: 'kp' not set") # ADJUST SCALE if self._scale_option == options.LOCAL_SCALE_MEDIAN: dist = np.median(matrix, axis=1) scale = np.outer(dist, dist) elif self._scale_option == options.LOCAL_SCALE_MANUAL: scale = kscale(matrix, self._manual_scale) elif self._scale_option == options.LOCAL_SCALE_ESTIMATE: if est_scale is None: _, _, scale = binsearch_mask(matrix, logic=logic) else: # Nothing to be done - est_scale was set during the PRUNING_ESTIMATE scale = est_scale else: raise ValueError("Unexpected error: 'scale' not set") # ZeroDivisionError safety check if not (scale > 1e-5).all(): if verbosity > 0: print('Rescaling to avoid zero-div error') _, _, scale = binsearch_mask(matrix, logic=logic) assert (scale > 1e-5).all() aff = affinity(matrix, mask, scale) aff.flat[::len(aff)+1] = 1.0 return aff def cluster(self, n, embed_dim=None, algo=spectral.SPECTRAL, method=methods.KMEANS): """ Cluster the embedded coordinates using spectral clustering Parameters ---------- n: int The number of clusters to return embed_dim: int The dimensionality of the underlying coordinates Defaults to same value as n algo: enum value (spectral.SPECTRAL | spectral.KPCA | spectral.ZELNIKMANOR) Type of embedding to use method: enum value (methods.KMEANS | methods.GMM) The clustering method to use Returns ------- Partition: Partition object describing the data partition """ if n == 1: return Partition([1] * len(self.get_dm(False))) if embed_dim is None: embed_dim = n if algo == spectral.SPECTRAL: self._coords = self.spectral_embedding(embed_dim) elif algo == spectral.KPCA: self._coords = self.kpca_embedding(embed_dim) elif algo == spectral.ZELNIKMANOR: self._coords = self.spectral_embedding_(embed_dim) else: raise OptionError(algo, list(spectral.reverse.values())) if method == methods.KMEANS: p = self.kmeans(n, self._coords.df.values) elif method == methods.GMM: p = self.gmm(n, self._coords.df.values) elif method == methods.WARD: linkmat = fastcluster.linkage(self._coords.values, 'ward') p = _hclust(linkmat, n) else: raise OptionError(method, list(methods.reverse.values())) if self._verbosity > 0: print('Using clustering method: {}'.format(methods.reverse[method])) return p def spectral_embedding(self, n): """ Embed the points using spectral decomposition of the laplacian of the affinity matrix Parameters ---------- n: int The number of dimensions """ coords = spectral_embedding(self._affinity, n) return CoordinateMatrix(normalise_rows(coords)) def spectral_embedding_(self, n): """ Old method for generating coords, used on original analysis of yeast data. Included to reproduce yeast result from paper. Reason for difference - switched to using spectral embedding method provided by scikit-learn (mainly because it spreads points over a sphere, rather than a half sphere, so looks better plotted). Uses a different Laplacian matrix. """ aff = self._affinity.copy() aff.flat[::aff.shape[0]+1] = 0 laplacian = laplace(aff) decomp = eigen(laplacian) return CoordinateMatrix(normalise_rows(decomp.vecs[:,:n])) def kpca_embedding(self, n): """ Embed the points using kernel PCA of the affinity matrix Parameters ---------- n: int The number of dimensions """ return self.dm.embedding(n, 'kpca', affinity_matrix=self._affinity) @property def affinity(self): return self._affinity class MultidimensionalScaling(ClusteringManager, EMMixin): """ Manager for clustering using multidimensional scaling """ def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS): """ Cluster the embedded coordinates using multidimensional scaling Parameters ---------- n: int The number of clusters to return embed_dim int The dimensionality of the underlying coordinates Defaults to same value as n method: enum value (methods.KMEANS | methods.GMM) The clustering method to use Returns ------- Partition: Partition object describing the data partition """ if n == 1: return Partition([1] * len(self.get_dm(False))) if embed_dim is None: embed_dim = n if algo == mds.CLASSICAL: self._coords = self.dm.embedding(embed_dim, 'cmds') elif algo == mds.METRIC: self._coords = self.dm.embedding(embed_dim, 'mmds') else: raise OptionError(algo, list(mds.reverse.values())) if method == methods.KMEANS: p = self.kmeans(n, self._coords.values) elif method == methods.GMM: p = self.gmm(n, self._coords.values) elif method == methods.WARD: linkmat = fastcluster.linkage(self._coords.values, 'ward') p = _hclust(linkmat, n) else: raise OptionError(method, list(methods.reverse.values())) #if self._verbosity > 0: # print('Using clustering method: {}'.format(methods.reverse[method])) return p class Hierarchical(ClusteringManager): """ Apply clustering methods to distance matrix = Hierarchical clustering - single-linkage - complete-linkage - average- linkage (UPGMA) - Ward's method = k-medoids = Multidimensional Scaling (Principal Coordinate Analysis) + k-means = Spectral Clustering + k-means - NJW method - Shi-Malik method - Zelnik- Manor and Perona Local Scaling - Local Scaling with eigenvector rotation as stopping criterion """ def __str__(self): return 'Hierarchical Clustering' def cluster(self, nclusters, linkage_method=linkage.WARD, **kwargs): """ Do hierarchical clustering on a distance matrix using one of the methods: methods.SINGLE = single-linkage clustering methods.COMPLETE = complete-linkage clustering methods.AVERAGE = average-linkage clustering methods.WARD = Ward's minimum variance method """ if linkage_method == linkage.SINGLE: return self._hclust(nclusters, 'single', **kwargs) elif linkage_method == linkage.COMPLETE: return self._hclust(nclusters, 'complete', **kwargs) elif linkage_method == linkage.AVERAGE: return self._hclust(nclusters, 'average', **kwargs) elif linkage_method == linkage.WARD: return self._hclust(nclusters, 'ward', **kwargs) elif linkage_method == linkage.WEIGHTED: return self._hclust(nclusters, 'weighted', **kwargs) elif linkage_method == linkage.CENTROID: return self._hclust(nclusters, 'centroid', **kwargs) elif linkage_method == linkage.MEDIAN: return self._hclust(nclusters, 'median', **kwargs) else: raise ValueError('Unknown linkage_method: {}'.format(linkage_method)) def _hclust(self, nclusters, method, noise=False): """ :param nclusters: Number of clusters to return :param linkage_method: single, complete, average, ward, weighted, centroid or median (http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html) :param noise: Add Gaussian noise to the distance matrix prior to clustering (bool, default=False) :return: Partition object describing clustering """ matrix = self.get_dm(noise) linkmat = fastcluster.linkage(squareform(matrix), method) self.nclusters = nclusters # Store these in case we want to plot self.linkmat = linkmat # return _hclust(linkmat, nclusters) def plot_dendrogram(self, nclusters=None, leaf_font_size=8, leaf_rotation=90, names=None, title_font_size=16, ): """ Plots the dendrogram of the most recently generated partition :param nclusters: Override the plot default number of clusters :return: matplotlib.pyplot.figure """ if not hasattr(self, 'nclusters') and not hasattr(self, 'linkmat'): raise ValueError("This instance has no plottable information.") if nclusters is None: nclusters = self.nclusters threshold = _get_threshold(self.linkmat, nclusters) import matplotlib.pyplot as plt fig = plt.figure(figsize=(11.7, 8.3)) if names is not None: labfn=lambda leaf: names[leaf] else: labfn=None leaf_rotation=0 dendrogram( self.linkmat, color_threshold=threshold, leaf_font_size=leaf_font_size, leaf_rotation=leaf_rotation, leaf_label_func=labfn, count_sort=True, ) plt.suptitle('Dendrogram', fontsize=title_font_size) # plt.title('Distance metric: {0} Linkage method: {1} Number of classes: {2}'.format(compound_key[0], # compound_key[1], compound_key[2]), fontsize=12) plt.axhline(threshold, color='grey', ls='dashed') plt.xlabel('Gene') plt.ylabel('Distance') return fig class Automatic(ClusteringManager): """ Clustering methods that automatically return the number of clusters - Affinity Propagation - DBSCAN """ def affinity_propagation(self, affinity_matrix=None, sigma=1, **kwargs): """ :param kwargs: damping=0.5, max_iter=200, convergence_iter=15, copy=True, preference=None, verbose=False :return: """ if affinity_matrix is None: aff = rbf(self.dm.values, sigma) else: aff = affinity_matrix est = AffinityPropagation(affinity='precomputed', **kwargs) est.fit(aff.view(np.ndarray)) return Partition(est.labels_) def dbscan(self, eps=0.75, min_samples=3): """ :param kwargs: key-value arguments to pass to DBSCAN (eps: max dist between points in same neighbourhood, min_samples: number of points in a neighbourhood) :return: """ est = DBSCAN(metric='precomputed', eps=eps, min_samples=min_samples) est.fit(self.get_dm(False)) return Partition(est.labels_) class Kmedoids(ClusteringManager): """ Kmedoids clustering acts directly on the distance matrix without needing an intermediate embedding into coordinate space """ def cluster(self, nclusters, noise=False, npass=100, nreps=1): if Biopython_Unavailable: print('kmedoids not available without Biopython') return matrix = self.get_dm(noise) p = [kmedoids(matrix, nclusters=nclusters, npass=npass) for _ in range(nreps)] p.sort(key=lambda x: x[1]) return Partition(p[0][0]) class Evaluation(ClusteringManager): """ Methods for evaluating the fit of a cluster to the distance matrix anosim and permanova seem pretty useless; silhouette is ok """ def anosim(self, partition, n_permutations=999): if partition.is_minimal(): raise ValueError("ANOSim is not defined for singleton clusters") elif partition.is_maximal(): raise ValueError("ANOSim is not defined for maximally divided partitions") result = skbio.stats.distance.ANOSIM(skbio.DistanceMatrix(self.get_dm(False)), partition.partition_vector) return result(n_permutations) def permanova(self, partition, n_permutations=999): if partition.is_minimal(): raise ValueError("PERMANOVA is not defined for singleton clusters") elif partition.is_maximal(): raise ValueError("PERMANOVA is not defined for maximally divided partitions") result = skbio.stats.distance.PERMANOVA(skbio.DistanceMatrix(self.get_dm(False)), partition.partition_vector) return result(n_permutations) def silhouette(self, partition): pvec = np.array(partition.partition_vector) groups = np.unique(pvec) nbrs = np.zeros(pvec.shape) scores = np.zeros(pvec.shape) if len(groups) == 1: raise ValueError("Silhouette is not defined for singleton clusters") for ingroup in groups: ingroup_ix = np.where(pvec == ingroup)[0] within, between, outgroups = self.__get_mean_dissimilarities_for_group(pvec, ingroup, groups) between_min = between.min(axis=0) outgroup_ix, neighbours_ix = np.where(between == between_min) neighbours = np.zeros(neighbours_ix.shape) neighbours[neighbours_ix] = outgroups[outgroup_ix] nbrs[ingroup_ix] = neighbours scores[ingroup_ix] = self.__silhouette_calc(within, between_min) return scores[1].mean() @staticmethod def __get_indices_for_groups_by_index(ix, jx): if len(ix) == len(jx) == 1 and ix == jx: return [list(ix)], [list(jx)] row_indices = [[i for j in jx if i != j] for i in ix] column_indices = [[j for j in jx if j != i] for i in ix] return row_indices, column_indices @staticmethod def __silhouette_calc(ingroup, outgroup): if len(ingroup) == 1: return 0 max_ = np.array([ingroup, outgroup]).max(axis=0) return (outgroup - ingroup) / max_ def __get_indices_for_groups(self, pvec, group1, group2): ix = np.where(pvec == group1)[0] jx = np.where(pvec == group2)[0] return self.__get_indices_for_groups_by_index(ix, jx) def __get_mean_dissimilarities_for_group(self, pvec, group, groups): outgroups = groups[groups != group] within_indices = self.__get_indices_for_groups(pvec, group, group) within_distances = self.dm.values[within_indices].mean(axis=1) dissimilarities = [] for outgroup in outgroups: between_indices = self.__get_indices_for_groups(pvec, group, outgroup) between_distances = self.dm.values[between_indices] dissimilarities.append(between_distances.mean(axis=1)) return within_distances, np.array(dissimilarities), outgroups
mit
ishanic/scikit-learn
sklearn/utils/metaestimators.py
283
2353
"""Utilities for meta-estimators""" # Author: Joel Nothman # Andreas Mueller # Licence: BSD from operator import attrgetter from functools import update_wrapper __all__ = ['if_delegate_has_method'] class _IffHasAttrDescriptor(object): """Implements a conditional property using the descriptor protocol. Using this class to create a decorator will raise an ``AttributeError`` if the ``attribute_name`` is not present on the base object. This allows ducktyping of the decorated method based on ``attribute_name``. See https://docs.python.org/3/howto/descriptor.html for an explanation of descriptors. """ def __init__(self, fn, attribute_name): self.fn = fn self.get_attribute = attrgetter(attribute_name) # update the docstring of the descriptor update_wrapper(self, fn) def __get__(self, obj, type=None): # raise an AttributeError if the attribute is not present on the object if obj is not None: # delegate only on instances, not the classes. # this is to allow access to the docstrings. self.get_attribute(obj) # lambda, but not partial, allows help() to work with update_wrapper out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs) # update the docstring of the returned function update_wrapper(out, self.fn) return out def if_delegate_has_method(delegate): """Create a decorator for methods that are delegated to a sub-estimator This enables ducktyping by hasattr returning True according to the sub-estimator. >>> from sklearn.utils.metaestimators import if_delegate_has_method >>> >>> >>> class MetaEst(object): ... def __init__(self, sub_est): ... self.sub_est = sub_est ... ... @if_delegate_has_method(delegate='sub_est') ... def predict(self, X): ... return self.sub_est.predict(X) ... >>> class HasPredict(object): ... def predict(self, X): ... return X.sum(axis=1) ... >>> class HasNoPredict(object): ... pass ... >>> hasattr(MetaEst(HasPredict()), 'predict') True >>> hasattr(MetaEst(HasNoPredict()), 'predict') False """ return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
bsd-3-clause