repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
piotrsobecki/opt | opt/genetic.py | 1 | 4128 | import csv
import json
import logging
import pandas as pd
from deap import base, tools, algorithms
from opt.base import Configuration, Optimizer, Results
class LogHelper():
def __init__(self):
super().__init__()
self.logger = logging.getLogger('optimizer')
def log(self, context, generation_no, results):
pass
def setup(self, context):
pass
def close(self, context):
pass
class RoutingHOF():
def __init__(self, optimizer, context, results_class):
self.optimizer = optimizer
self.context = context
self.results_class = results_class
self.ngen = 0
self.results = None
def insert(self, item):
pass
def update(self, population):
results = self.results_class([self.optimizer.configuration(x) for x in population])
self.optimizer.on_gen_end(self.context, self.ngen, results)
self.ngen += 1
self.results = results
class GeneticConfiguration(Configuration):
def value(self):
return self.individual.fitness.values[0]
# noinspection PyTypeChecker,PyUnresolvedReferences
class GeneticOptimizer(Optimizer):
results_class = Results
def __init__(self, **settings):
if settings is None:
settings = {}
self.logger = logging.getLogger('optimizer')
self.settings = {**self.default_settings(), **settings}
def default_settings(self):
return {
'tournsize': 3,
'indpb': 0.05,
'ngen': 40,
'n': 300,
'mutpb': 0.1,
'cxpb': 0.5,
"verbose": True
}
def eval(self, individual):
raise NotImplementedError()
def individual(self, toolbox):
raise NotImplementedError()
def configuration(self, individual):
return GeneticConfiguration(individual)
def get_genlog(self):
return pd.read_csv(self.settings['genlog'], self.settings['sep'], index_col=0)
def get_datalog(self):
return pd.read_csv(self.settings['datalog'], self.settings['sep'], index_col=0)
def mate(self, toolbox):
toolbox.register("mate", tools.cxTwoPoint)
def mutate(self, toolbox):
toolbox.register("mutate", tools.mutFlipBit, indpb=self.indpb)
def evaluate(self, toolbox):
toolbox.register("evaluate", self.eval)
def select(self, toolbox):
toolbox.register("select", tools.selTournament, tournsize=self.tournsize)
def population(self, toolbox):
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def __getattr__(self, item):
if item in self.settings:
return self.settings[item]
return None
def on_gen_end(self, context, generation_no, results):
self.logger.debug('Generation %d, max: %s' % (generation_no, results.max()))
if self.verbose:
context['log'].log(context, generation_no, results)
def on_fit_start(self, context):
if self.verbose:
context['log'].setup(context)
def on_fit_end(self, context):
if self.verbose:
context['log'].close(context)
def log_helper(self):
return LogHelper()
def fit(self):
toolbox = base.Toolbox()
self.individual(toolbox)
self.population(toolbox)
self.evaluate(toolbox)
self.mate(toolbox)
self.mutate(toolbox)
self.select(toolbox)
population = toolbox.population(self.n)
context = {
'settings': self.settings,
'features': self.features,
'log': self.log_helper()
}
self.on_fit_start(context)
hof = RoutingHOF(self, context, results_class=self.results_class)
algorithms.eaSimple(population, toolbox, cxpb=self.cxpb, mutpb=self.mutpb, ngen=self.ngen, halloffame=hof,
verbose=self.verbose)
self.on_fit_end(context)
return hof.results
def __str__(self):
return "%s(Settings = %s)" % (type(self).__name__, json.dumps(self.settings, indent=4, sort_keys=True))
| mit |
henrykironde/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
lenovor/dynamic-nmf | find-dynamic-topics.py | 1 | 7627 | #!/usr/bin/env python
"""
Tool to generate a dynamic topic model, by combining a set of time window topic models.
"""
import os, sys, random, operator
import logging as log
from optparse import OptionParser
import numpy as np
import sklearn.preprocessing
import text.util
import unsupervised.nmf, unsupervised.rankings, unsupervised.coherence
# --------------------------------------------------------------
class TopicCollection:
def __init__( self, top_terms = 0, threshold = 1e-6 ):
# settings
self.top_terms = top_terms
self.threshold = threshold
# state
self.topic_ids = []
self.all_weights = []
self.all_terms = set()
def add_topic_model( self, H, terms, window_topic_labels ):
'''
Add topics from a window topic model to the collection.
'''
k = H.shape[0]
for topic_index in range(k):
topic_weights = {}
# use top terms only (sparse topic representation)?
if self.top_terms > 0:
top_indices = np.argsort( H[topic_index,:] )[::-1]
for term_index in top_indices[0:self.top_terms]:
topic_weights[terms[term_index]] = H[topic_index,term_index]
self.all_terms.add( terms[term_index] )
# use dense window topic vectors
else:
total_weight = 0.0
for term_index in range(len(terms)):
total_weight += H[topic_index,term_index]
for term_index in range(len(terms)):
w = H[topic_index,term_index] / total_weight
if w >= self.threshold:
topic_weights[terms[term_index]] = H[topic_index,term_index]
self.all_terms.add( terms[term_index] )
self.all_weights.append( topic_weights )
self.topic_ids.append( window_topic_labels[topic_index] )
def create_matrix( self ):
'''
Create the topic-term matrix from all window topics that have been added so far.
'''
# map terms to column indices
all_terms = list(self.all_terms)
M = np.zeros( (len(self.all_weights), len(all_terms)) )
term_col_map = {}
for term in all_terms:
term_col_map[term] = len(term_col_map)
# populate the matrix in row-order
row = 0
for topic_weights in self.all_weights:
for term in topic_weights.keys():
M[row,term_col_map[term]] = topic_weights[term]
row +=1
# normalize the matrix rows to L2 unit length
normalizer = sklearn.preprocessing.Normalizer(norm='l2', copy=True)
normalizer.fit(M)
M = normalizer.transform(M)
return (M,all_terms)
# --------------------------------------------------------------
def main():
parser = OptionParser(usage="usage: %prog [options] window_topics1 window_topics2...")
parser.add_option("--seed", action="store", type="int", dest="seed", help="initial random seed", default=1000)
parser.add_option("-k", action="store", type="string", dest="krange", help="number of topics", default=None)
parser.add_option("-d", "--dims", action="store", type="int", dest="dimensions", help="number of dimensions to use for topic-term matrix", default=20)
parser.add_option("--maxiters", action="store", type="int", dest="maxiter", help="maximum number of iterations", default=200)
parser.add_option("-o","--outdir", action="store", type="string", dest="dir_out", help="output directory (default is current directory)", default=None)
parser.add_option("-m", "--model", action="store", type="string", dest="model_path", help="path to Word2Vec model, if performing automatic selection of number of topics", default=None)
parser.add_option("-t", "--top", action="store", type="int", dest="top", help="number of top terms to use, if performing automatic selection of number of topics", default=20)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="display topic descriptors")
(options, args) = parser.parse_args()
if( len(args) < 2 ):
parser.error( "Must specify at least two window topic files" )
log.basicConfig(level=20, format='%(message)s')
# Parse user-specified range for number of topics K
if options.krange is None:
parser.error("Must specific number of topics, or a range for the number of topics")
parts = options.krange.split(",")
kmin = int(parts[0])
if len(parts) == 1:
kmax = kmin
validation_measure = None
else:
kmax = int(parts[1])
if options.model_path is None:
parser.error("Must specific a file containing a Word2Vec model when performing automatic selection of number of topics")
log.info( "Loading Word2Vec model from %s ..." % options.model_path )
import gensim
model = gensim.models.Word2Vec.load(options.model_path)
validation_measure = unsupervised.coherence.WithinTopicMeasure( unsupervised.coherence.ModelSimilarity(model) )
# Output directory for results
if options.dir_out is None:
dir_out = os.getcwd()
else:
dir_out = options.dir_out
# Set random state
np.random.seed( options.seed )
random.seed( options.seed )
# Process each specified window topic model
log.info("- Processing individual time window topic models ...")
collection = TopicCollection()
for window_model_path in args:
# Load the cached time window
window_name = os.path.splitext( os.path.split( window_model_path )[-1] )[0]
(doc_ids, terms, term_rankings, partition, W, H, window_topic_labels) = unsupervised.nmf.load_nmf_results( window_model_path )
log.info("Loaded %d time window topics from %s" % (len(term_rankings),window_model_path) )
collection.add_topic_model( H, terms, window_topic_labels )
# Create the topic-term matrix
M, all_terms = collection.create_matrix()
log.info( "Created topic-term matrix of size %dx%d" % M.shape )
log.debug( "Matrix stats: range=[%.2f,%.2f] mean=%.2f" % ( np.min(M), np.mean(M), np.max(M) ) )
# NMF implementation
impl = unsupervised.nmf.SklNMF( max_iters = options.maxiter, init_strategy = "nndsvd" )
# Generate window topic model for the specified range of numbers of topics
coherence_scores = {}
for k in range(kmin,kmax+1):
log.info( "Applying dynamic topic modeling to matrix for k=%d topics ..." % k )
impl.apply( M, k )
log.info( "Generated %dx%d factor W and %dx%d factor H" % ( impl.W.shape[0], impl.W.shape[1], impl.H.shape[0], impl.H.shape[1] ) )
# Create a disjoint partition of documents
partition = impl.generate_partition()
# Create topic labels
topic_labels = []
for i in range( k ):
topic_labels.append( "D%02d" % (i+1) )
# Create term rankings for each topic
term_rankings = []
for topic_index in range(k):
ranked_term_indices = impl.rank_terms( topic_index )
term_ranking = [all_terms[i] for i in ranked_term_indices]
term_rankings.append(term_ranking)
# Print out the top terms?
if options.verbose:
print unsupervised.rankings.format_term_rankings( term_rankings, top = options.top )
# Evaluate topic coherence of this topic model?
if not validation_measure is None:
truncated_term_rankings = unsupervised.rankings.truncate_term_rankings( term_rankings, options.top )
coherence_scores[k] = validation_measure.evaluate_rankings( truncated_term_rankings )
log.info("Model coherence (k=%d) = %.4f" % (k,coherence_scores[k]) )
# Write results
results_out_path = os.path.join( dir_out, "dynamictopics_k%02d.pkl" % (k) )
unsupervised.nmf.save_nmf_results( results_out_path, collection.topic_ids, all_terms, term_rankings, partition, impl.W, impl.H, topic_labels )
# Need to select value of k?
if len(coherence_scores) > 0:
sx = sorted(coherence_scores.items(), key=operator.itemgetter(1))
sx.reverse()
top_k = [ p[0] for p in sx ][0:min(3,len(sx))]
log.info("- Top recommendations for number of dynamic topics: %s" % ",".join(map(str, top_k)) )
# --------------------------------------------------------------
if __name__ == "__main__":
main()
| apache-2.0 |
rahuldhote/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| mit |
ycasg/PyNLO | src/examples/supercontinuum_with_FROG.py | 2 | 4038 | import numpy as np
import matplotlib.pyplot as plt
import pynlo
FWHM = 0.050 # pulse duration (ps)
pulseWL = 1550 # pulse central wavelength (nm)
EPP = 50e-12 # Energy per pulse (J)
GDD = 0.0 # Group delay dispersion (ps^2)
TOD = 0.0 # Third order dispersion (ps^3)
Window = 10.0 # simulation window (ps)
Steps = 40 # simulation steps
Points = 2**13 # simulation points
error = 0.2
beta2 = -120 # (ps^2/km)
beta3 = 0.00 # (ps^3/km)
beta4 = 0.005 # (ps^4/km)
Length = 10 # length in mm
Alpha = 0.0 # attentuation coefficient (dB/cm)
Gamma = 1000 # Gamma (1/(W km)
fibWL = pulseWL # Center WL of fiber (nm)
Raman = True # Enable Raman effect?
Steep = True # Enable self steepening?
alpha = np.log((10**(Alpha * 0.1))) * 100 # convert from dB/cm to 1/m
# set up plots for the results:
fig = plt.figure(figsize=(8,8))
ax0 = plt.subplot2grid((3,2), (0, 0), rowspan=1)
ax1 = plt.subplot2grid((3,2), (0, 1), rowspan=1)
ax2 = plt.subplot2grid((3,2), (1, 0), rowspan=2, sharex=ax0)
ax3 = plt.subplot2grid((3,2), (1, 1), rowspan=2, sharex=ax1)
######## This is where the PyNLO magic happens! ############################
# create the pulse!
pulse = pynlo.light.DerivedPulses.SechPulse(power = 1, # Power will be scaled by set_epp
T0_ps = FWHM/1.76,
center_wavelength_nm = pulseWL,
time_window_ps = Window,
GDD=GDD, TOD=TOD,
NPTS = Points,
frep_MHz = 100,
power_is_avg = False)
# set the pulse energy!
pulse.set_epp(EPP)
# create the fiber!
fiber1 = pynlo.media.fibers.fiber.FiberInstance()
fiber1.generate_fiber(Length * 1e-3, center_wl_nm=fibWL, betas=(beta2, beta3, beta4),
gamma_W_m=Gamma * 1e-3, gvd_units='ps^n/km', gain=-alpha)
# Propagation
evol = pynlo.interactions.FourWaveMixing.SSFM.SSFM(local_error=error, USE_SIMPLE_RAMAN=True,
disable_Raman = np.logical_not(Raman),
disable_self_steepening = np.logical_not(Steep))
y, AW, AT, pulse_out = evol.propagate(pulse_in=pulse, fiber=fiber1, n_steps=Steps)
########## That's it! Physics complete. Just plotting commands from here! ################
F = pulse.F_THz # Frequency grid of pulse (THz)
def dB(num):
return 10 * np.log10(np.abs(num)**2)
zW = dB( np.transpose(AW)[:, (F > 0)] )
zT = dB( np.transpose(AT) )
y_mm = y * 1e3 # convert distance to mm
ax0.plot(pulse_out.F_THz, dB(pulse_out.AW), color = 'r')
ax1.plot(pulse_out.T_ps, dB(pulse_out.AT), color = 'r')
ax0.plot(pulse.F_THz, dB(pulse.AW), color = 'b')
ax1.plot(pulse.T_ps, dB(pulse.AT), color = 'b')
extent = (np.min(F[F > 0]), np.max(F[F > 0]), 0, Length)
ax2.imshow(zW, extent=extent,
vmin=np.max(zW) - 40.0, vmax=np.max(zW),
aspect='auto', origin='lower')
extent = (np.min(pulse.T_ps), np.max(pulse.T_ps), np.min(y_mm), Length)
ax3.imshow(zT, extent=extent,
vmin=np.max(zT) - 40.0, vmax=np.max(zT),
aspect='auto', origin='lower')
ax0.set_ylabel('Intensity (dB)')
ax0.set_ylim( - 80, 0)
ax1.set_ylim( - 40, 40)
ax2.set_ylabel('Propagation distance (mm)')
ax2.set_xlabel('Frequency (THz)')
ax2.set_xlim(0,400)
ax3.set_xlabel('Time (ps)')
fig, axs = plt.subplots(1,2,figsize=(10,5))
for ax, gate_type in zip(axs,('xfrog', 'frog')):
DELAYS, FREQS, extent, spectrogram = pulse_out.spectrogram(gate_type=gate_type, gate_function_width_ps=0.05, time_steps=1000)
ax.imshow(spectrogram, aspect='auto', extent=extent)
ax.set_xlabel('Time (ps)')
ax.set_ylabel('Frequency (THz)')
ax.set_title(gate_type)
plt.show() | gpl-3.0 |
hadim/public_notebooks | Analysis/Fit_Ellipse/tifffile.py | 3 | 173019 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
# try:
# import _tifffile
# except ImportError:
# warnings.warn(
# "failed to import the optional _tifffile C extension module.\n"
# "Loading of some compressed images will be slow.\n"
# "Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2014.08.24'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = u'{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, compress=6,
... extratags=[(270, 's', 0, description, True)])
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py'):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
self._byteorder = byteorder
self._software = software
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._val_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._val_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
description=None, volume=False, writeshape=False, compress=0,
extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
if photometric not in (None, 'minisblack', 'miniswhite', 'rgb'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
if not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
val_format = self._val_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
# normalize shape of data
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
warnings.warn("volume width or length are not multiple of 16")
volume = False
data = numpy.swapaxes(data, 1, 2)
data = data.reshape(
(data.shape[0] * data.shape[1],) + data.shape[2:])
# data.shape is now normalized 5D or 6D, depending on volume
# (pages, planar_samples, (depth,) height, width, contig_samples)
assert len(data.shape) in (5, 6)
shape = data.shape
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
if volume:
# use tiles to save volume data
tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
tag_offsets = TiffWriter.TAGS['tile_offsets']
else:
# else use strips
tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
tag_offsets = TiffWriter.TAGS['strip_offsets']
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
# Append (code, ifdentry, ifdvalue, writeonce) to tags list.
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format,
pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif writeshape and shape[0] > 1 and shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if volume:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, shape[-4])
addtag('tile_width', 'I', 1, shape[-2])
addtag('tile_length', 'I', 1, shape[-3])
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
addtag('rows_per_strip', 'I', 1,
shape[-3] * (shape[-4] if volume else 1))
# use one strip or tile per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], (0, ) * shape[1])
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize
> 2**31-1):
raise ValueError("data too large for non-bigtiff file")
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip and tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip or tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip or tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> im = imread('test.tif', key=0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('test.tif') as tif:
... data = tif.asarray()
... data.shape
(256, 256, 4)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile()
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
page0 = self.pages[0]
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(page0.dtype))]
elif self.is_lsm:
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(page0.shape)
axes.extend(page0.axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif self.is_nih:
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif page0.is_shaped:
# TODO: shaped files can contain multiple series
shape = page0.tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(page0.dtype))]
# generic detection of series
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape not in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
return pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
s = self.series[series]
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
result = result.reshape(-1)
else:
result = numpy.empty(s.shape, s.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
# try series of expected shapes
result.shape = (-1,) + self.series[series].shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = etree.fromstring(self.pages[0].tags['image_description'].value.decode(errors='ignore'))
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
record.shape = tuple(record.shape)
# # squeeze dimensions
# for record in result:
# record.shape, record.axes = squeeze_axes(record.shape, record.axes)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
return self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes (stk)
1. planar samples_per_pixel
2. image_depth Z (sgi)
3. image_length Y
4. image_width X
5. contig samples_per_pixel
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
# print(tag)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell()
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, planes, image_length, image_width)
else:
self.shape = (3, planes, image_depth, image_length,
image_width)
self.axes = 'C' + self.axes
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'CYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'CZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8), )
assert len(self.shape) == len(self.axes)
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
"""
if not self._shape:
return
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x):
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8))
* (bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpackrgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpackints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
decompress = lambda x: decodejpg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not (self.is_tiled and not
self.is_contiguous):
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file remains open if an exception occurred above
fh.close()
return result
def _is_memmappable(self, rgbonly, colormapped):
"""Return if image data in file can be memory mapped."""
if not self.parent.filehandle.is_file or not self.is_contiguous:
return False
return not (self.predictor or
(rgbonly and 'extra_samples' in self.tags) or
(colormapped and self.is_palette) or
({'big': '>', 'little': '<'}[sys.byteorder] !=
self.parent.byteorder))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1]
or byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
tags = self.tags
if 'image_description_1' in tags:
# MicroManager
result = imagej_description(tags['image_description_1'].value)
else:
result = imagej_description(tags['image_description'].value)
if 'imagej_metadata' in tags:
try:
result.update(imagej_metadata(
tags['imagej_metadata'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""True if page contains a contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image and not OME or STK."""
try:
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
except IndexError:
pass # OME-XML not found in first page
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""True if page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return (
('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ=')) or
('image_description_1' in self.tags and # Micromanager
self.tags['image_description_1'].value.startswith(b'ImageJ=')))
@lazyattr
def is_micromanager(self):
"""True if page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes)
and self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _correct_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this. Need example file.
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif")
>>> tifs.shape, tifs.axes
((2, 100), 'CT')
>>> data = tifs.asarray()
>>> data.shape
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, basestring):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON `%s`" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for i in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dict from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=False):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if not package:
module = import_module(module)
else:
module = import_module('.' + module, package=package)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decodejpg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
import _czifile
image = _czifile.decodejpg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return shape, ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00')
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00')
b'string\\x00string\\n'
>>> stripascii(b'\\x00')
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value, )
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
# 'jpeg': decodejpg
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
#'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
data.shape[-1] < data.shape[-3] // 16 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
raise NotImplementedError("complex type") # TODO: handle complex types
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
if __name__ == "__main__":
sys.exit(main())
| mit |
Sentient07/scikit-learn | sklearn/preprocessing/tests/test_label.py | 40 | 18519 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert_true(issparse(got))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
zhenv5/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
B3AU/waveTree | sklearn/decomposition/tests/test_truncated_svd.py | 8 | 2692 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
| bsd-3-clause |
AustereCuriosity/astropy | astropy/visualization/tests/test_lupton_rgb.py | 2 | 9330 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for RGB Images
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import tempfile
import pytest
import numpy as np
from numpy.testing import assert_equal
from ...convolution import convolve, Gaussian2DKernel
from .. import lupton_rgb
try:
import matplotlib # noqa
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
# Set display=True to get matplotlib imshow windows to help with debugging.
display = False
def display_rgb(rgb, title=None):
"""Display an rgb image using matplotlib (useful for debugging)"""
import matplotlib.pyplot as plt
plt.imshow(rgb, interpolation='nearest', origin='lower')
if title:
plt.title(title)
plt.show()
return plt
def saturate(image, satValue):
"""
Return image with all points above satValue set to NaN.
Simulates saturation on an image, so we can test 'replace_saturated_pixels'
"""
result = image.copy()
saturated = image > satValue
result[saturated] = np.nan
return result
def random_array(dtype, N=100):
return np.array(np.random.random(10)*100, dtype=dtype)
def test_compute_intensity_1_float():
image_r = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_1_uint():
image_r = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_3_float():
image_r = random_array(np.float64)
image_g = random_array(np.float64)
image_b = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)/3.0)
def test_compute_intensity_3_uint():
image_r = random_array(np.uint8)
image_g = random_array(np.uint8)
image_b = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)//3)
class TestLuptonRgb(object):
"""A test case for Rgb"""
def setup_method(self, method):
np.random.seed(1000) # so we always get the same images.
self.min_, self.stretch_, self.Q = 0, 5, 20 # asinh
width, height = 85, 75
self.width = width
self.height = height
shape = (width, height)
image_r = np.zeros(shape)
image_g = np.zeros(shape)
image_b = np.zeros(shape)
# pixel locations, values and colors
points = [[15, 15], [50, 45], [30, 30], [45, 15]]
values = [1000, 5500, 600, 20000]
g_r = [1.0, -1.0, 1.0, 1.0]
r_i = [2.0, -0.5, 2.5, 1.0]
# Put pixels in the images.
for p, v, gr, ri in zip(points, values, g_r, r_i):
image_r[p[0], p[1]] = v*pow(10, 0.4*ri)
image_g[p[0], p[1]] = v*pow(10, 0.4*gr)
image_b[p[0], p[1]] = v
# convolve the image with a reasonable PSF, and add Gaussian background noise
def convolve_with_noise(image, psf):
convolvedImage = convolve(image, psf, boundary='extend', normalize_kernel=True)
randomImage = np.random.normal(0, 2, image.shape)
return randomImage + convolvedImage
psf = Gaussian2DKernel(2.5)
self.image_r = convolve_with_noise(image_r, psf)
self.image_g = convolve_with_noise(image_g, psf)
self.image_b = convolve_with_noise(image_b, psf)
def test_Asinh(self):
"""Test creating an RGB image using an asinh stretch"""
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscale(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensity(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityPedestal(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity
where the images each have a pedestal added"""
pedestal = [100, 400, -400]
self.image_r += pedestal[0]
self.image_g += pedestal[1]
self.image_b += pedestal[2]
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b, pedestal=pedestal)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityBW(self):
"""Test creating a black-and-white image using an asinh stretch estimated
using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r)
rgbImage = map.make_rgb_image(self.image_r, self.image_r, self.image_r)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_make_rgb(self):
"""Test the function that does it all"""
satValue = 1000.0
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q, filename=temp)
assert os.path.exists(temp.name)
def test_make_rgb_saturated_fix(self):
pytest.skip('saturation correction is not implemented')
satValue = 1000.0
# TODO: Cannot test with these options yet, as that part of the code is not implemented.
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q,
saturated_border_width=1, saturated_pixel_value=2000,
filename=temp)
def test_linear(self):
"""Test using a specified linear stretch"""
map = lupton_rgb.LinearMapping(-8.45, 13.44)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_linear_min_max(self):
"""Test using a min/max linear stretch determined from one image"""
map = lupton_rgb.LinearMapping(image=self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_saturated(self):
"""Test interpolationolating saturated pixels"""
pytest.skip('replaceSaturatedPixels is not implemented in astropy yet')
satValue = 1000.0
self.image_r = saturate(self.image_r, satValue)
self.image_g = saturate(self.image_g, satValue)
self.image_b = saturate(self.image_b, satValue)
lupton_rgb.replaceSaturatedPixels(self.image_r, self.image_g, self.image_b, 1, 2000)
# Check that we replaced those NaNs with some reasonable value
assert np.isfinite(self.image_r.getImage().getArray()).all()
assert np.isfinite(self.image_g.getImage().getArray()).all()
assert np.isfinite(self.image_b.getImage().getArray()).all()
# Prepare for generating an output file
self.imagesR = self.imagesR.getImage()
self.imagesR = self.imagesG.getImage()
self.imagesR = self.imagesB.getImage()
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_different_shapes_asserts(self):
with pytest.raises(ValueError) as excinfo:
# just swap the dimensions to get a differently-shaped 'r'
image_r = self.image_r.reshape(self.height, self.width)
lupton_rgb.make_lupton_rgb(image_r, self.image_g, self.image_b)
assert "shapes must match" in str(excinfo.value)
| bsd-3-clause |
kakaba2009/MachineLearning | python/src/mylib/mplot1.py | 1 | 2808 | import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.dates as dates
import matplotlib.pyplot as plt
from matplotlib import style
from pandas.tseries.offsets import *
from mpl_toolkits.mplot3d import Axes3D
from pandas.tools.plotting import radviz
from matplotlib import ticker as mticker
from pandas.tools.plotting import lag_plot
from pandas.tools.plotting import scatter_matrix
from pandas.tools.plotting import andrews_curves
from pandas.tools.plotting import bootstrap_plot
from pandas.tools.plotting import parallel_coordinates
from pandas.tools.plotting import autocorrelation_plot
def pandas_2d(X, log = False):
X.plot(logy = log)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def pandas_scatter_matrix(df):
scatter_matrix(df, alpha=0.2, figsize=(10, 10), diagonal='kde')
plt.show()
def pandas_density(df):
df.plot.kde()
plt.show()
def pandas_andrews_curves(df, column):
fig = plt.figure(1)
andrews_curves(df, column)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def pandas_lag_plot(df, l):
fig = plt.figure()
func = lag_plot(df, lag=l)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
return func
def pandas_bootstrap_plot(df):
bootstrap_plot(df, size=50, samples=500, color='grey')
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def pandas_radviz(df, column):
radviz(df, column)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def pandas_autocorrelation_plot(df):
autocorrelation_plot(df)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def pandas_parallel_coordinates(df, column):
parallel_coordinates(df, column)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def pandas_box(df):
df.plot.box()
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def pandas_hist(df):
df.plot.hist(stacked=True, bins=20, orientation='vertical', cumulative=True, alpha=0.5)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def pandas_rolling_std(df, day):
#pd.rolling_mean(df, window = 2)[1:10] # in future versions you want to resample separately
r = df.rolling(window = day)
#r.agg, r.apply, r.count, r.exclusions, r.max, r.median, r.name, r.quantile, r.kurt, r.cov, r.corr, r.aggregate, r.std, r.skew, r.sum, r.var
#df.plot(style = 'k--')
r.std().plot(subplots=True, style='k')
plt.show()
| apache-2.0 |
aprotopopov/lifetimes | lifetimes/fitters/beta_geo_fitter.py | 1 | 10009 | """Beta Geo Fitter, also known as BG/NBD model."""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
from numpy import log, asarray, any as npany, c_ as vconcat, isinf, isnan, \
where, exp
from numpy import ones_like
from pandas import DataFrame
from scipy.special import gammaln, hyp2f1, beta, gamma
from scipy import misc
from . import BaseFitter
from ..utils import _fit, _scale_time, _check_inputs
from ..generate_data import beta_geometric_nbd_model
class BetaGeoFitter(BaseFitter):
"""
Also known as the BG/NBD model.
Based on [1], this model has the following assumptions:
1) Each individual, i, has a hidden lambda_i and p_i parameter
2) These come from a population wide Gamma and a Beta distribution
respectively.
3) Individuals purchases follow a Poisson process with rate lambda_i*t .
4) After each purchase, an individual has a p_i probability of dieing
(never buying again).
[1] Fader, Peter S., Bruce G.S. Hardie, and Ka Lok Lee (2005a),
"Counting Your Customers the Easy Way: An Alternative to the
Pareto/NBD Model," Marketing Science, 24 (2), 275-84.
"""
def __init__(self, penalizer_coef=0.0):
"""Initialization, set penalizer_coef."""
self.penalizer_coef = penalizer_coef
def fit(self, frequency, recency, T, iterative_fitting=1,
initial_params=None, verbose=False, tol=1e-4, index=None,
fit_method='Nelder-Mead', maxiter=2000, **kwargs):
"""
Fit the data to the BG/NBD model.
Parameters:
frequency: the frequency vector of customers' purchases (denoted x
in literature).
recency: the recency vector of customers' purchases (denoted t_x in
literature).
T: the vector of customers' age (time since first purchase)
iterative_fitting: perform iterative_fitting fits over
random/warm-started initial params
initial_params: set the initial parameters for the fitter.
verbose: set to true to print out convergence diagnostics.
tol: tolerance for termination of the function minimization
process.
index: index for resulted DataFrame which is accessible via
self.data
fit_method: fit_method to passing to scipy.optimize.minimize
maxiter: max iterations for optimizer in scipy.optimize.minimize
will be overwritten if setted in kwargs.
kwargs: key word arguments to pass to the scipy.optimize.minimize
function as options dict
Returns:
self, with additional properties and methods like params_ and
predict
"""
frequency = asarray(frequency)
recency = asarray(recency)
T = asarray(T)
_check_inputs(frequency, recency, T)
self._scale = _scale_time(T)
scaled_recency = recency * self._scale
scaled_T = T * self._scale
params, self._negative_log_likelihood_ = _fit(
self._negative_log_likelihood,
[frequency, scaled_recency, scaled_T, self.penalizer_coef],
iterative_fitting,
initial_params,
4,
verbose,
tol,
fit_method,
maxiter,
**kwargs)
self.params_ = OrderedDict(zip(['r', 'alpha', 'a', 'b'], params))
self.params_['alpha'] /= self._scale
self.data = DataFrame(vconcat[frequency, recency, T],
columns=['frequency', 'recency', 'T'])
if index is not None:
self.data.index = index
self.generate_new_data = lambda size=1: beta_geometric_nbd_model(
T, *self._unload_params('r', 'alpha', 'a', 'b'), size=size)
self.predict = self.conditional_expected_number_of_purchases_up_to_time
return self
@staticmethod
def _negative_log_likelihood(params, freq, rec, T, penalizer_coef):
if npany(asarray(params) <= 0):
return np.inf
r, alpha, a, b = params
A_1 = gammaln(r + freq) - gammaln(r) + r * log(alpha)
A_2 = (gammaln(a + b) + gammaln(b + freq) - gammaln(b) -
gammaln(a + b + freq))
A_3 = -(r + freq) * log(alpha + T)
d = vconcat[ones_like(freq), (freq > 0)]
A_4 = log(a) - log(b + where(freq == 0, 1, freq) - 1) - \
(r + freq) * log(rec + alpha)
A_4[isnan(A_4) | isinf(A_4)] = 0
penalizer_term = penalizer_coef * sum(np.asarray(params) ** 2)
return -(A_1 + A_2 + misc.logsumexp(
vconcat[A_3, A_4], axis=1, b=d)).mean() + penalizer_term
def expected_number_of_purchases_up_to_time(self, t):
"""
Calculate the expected number of repeat purchases up to time t.
Calculate repeat purchases for a randomly choose individual from the
population.
Parameters:
t: a scalar or array of times.
Returns: a scalar or array
"""
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
hyp = hyp2f1(r, b, a + b - 1, t / (alpha + t))
return (a + b - 1) / (a - 1) * (1 - hyp * (alpha / (alpha + t)) ** r)
def conditional_expected_number_of_purchases_up_to_time(self, t, frequency,
recency, T):
"""
Conditional expected number of purchases up to time.
Calculate the expected number of repeat purchases up to time t for a
randomly choose individual from the population, given they have
purchase history (frequency, recency, T)
Parameters:
t: a scalar or array of times.
frequency: a scalar: historical frequency of customer.
recency: a scalar: historical recency of customer.
T: a scalar: age of the customer.
Returns: a scalar or array
"""
x = frequency
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
_a = r + x
_b = b + x
_c = a + b + x - 1
_z = t / (alpha + T + t)
ln_hyp_term = np.log(hyp2f1(_a, _b, _c, _z))
# if the value is inf, we are using a different but equivalent
# formula to compute the function evaluation.
ln_hyp_term_alt = np.log(hyp2f1(_c - _a, _c - _b, _c, _z)) + \
(_c - _a - _b) * np.log(1 - _z)
ln_hyp_term = where(np.isinf(ln_hyp_term), ln_hyp_term_alt, ln_hyp_term)
first_term = (a + b + x - 1) / (a - 1)
second_term = (1 - exp(ln_hyp_term + (r + x) *
np.log((alpha + T) / (alpha + t + T))))
numerator = first_term * second_term
denominator = 1 + (x > 0) * (a / (b + x - 1)) * \
((alpha + T) / (alpha + recency)) ** (r + x)
return numerator / denominator
def conditional_probability_alive(self, frequency, recency, T,
ln_exp_max=300):
"""
Compute conditional probability alive.
Compute the probability that a customer with history
(frequency, recency, T) is currently alive.
From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf
Parameters:
frequency: a scalar: historical frequency of customer.
recency: a scalar: historical recency of customer.
T: a scalar: age of the customer.
ln_exp_max: to what value clip log_div equation
Returns: a scalar
"""
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
log_div = (r + frequency) * log(
(alpha + T) / (alpha + recency)) + log(
a / (b + where(frequency == 0, 1, frequency) - 1))
return where(frequency == 0, 1.,
where(log_div > ln_exp_max, 0.,
1. / (1 + exp(np.clip(log_div, None, ln_exp_max)))))
def conditional_probability_alive_matrix(self, max_frequency=None,
max_recency=None):
"""
Compute the probability alive matrix.
Parameters:
max_frequency: the maximum frequency to plot. Default is max
observed frequency.
max_recency: the maximum recency to plot. This also determines
the age of the customer. Default to max observed age.
Returns:
A matrix of the form [t_x: historical recency,
x: historical frequency]
"""
max_frequency = max_frequency or int(self.data['frequency'].max())
max_recency = max_recency or int(self.data['T'].max())
return np.fromfunction(self.conditional_probability_alive,
(max_frequency + 1, max_recency + 1),
T=max_recency).T
def probability_of_n_purchases_up_to_time(self, t, n):
"""
Compute the probability of n purchases.
P( N(t) = n | model )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
"""
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
first_term = (beta(a, b + n) / beta(a, b) *
gamma(r + n) / gamma(r) /
gamma(n + 1) * (alpha / (alpha + t)) ** r *
(t / (alpha + t)) ** n)
if n > 0:
j = np.arange(0, n)
finite_sum = (gamma(r + j) / gamma(r) / gamma(j + 1) *
(t / (alpha + t)) ** j).sum()
second_term = (beta(a + 1, b + n - 1) /
beta(a, b) * (1 - (alpha / (alpha + t)) ** r *
finite_sum))
else:
second_term = 0
return first_term + second_term
| mit |
rgommers/statsmodels | statsmodels/graphics/boxplots.py | 1 | 15906 | """Variations on boxplots."""
# Author: Ralf Gommers
# Based on code by Flavio Coelho and Teemu Ikonen.
from statsmodels.compat.python import zip
import numpy as np
from scipy.stats import gaussian_kde
from . import utils
__all__ = ['violinplot', 'beanplot']
def violinplot(data, ax=None, labels=None, positions=None, side='both',
show_boxplot=True, plot_opts={}):
"""Make a violin plot of each dataset in the `data` sequence.
A violin plot is a boxplot combined with a kernel density estimate of the
probability density function per point.
Parameters
----------
data : sequence of ndarrays
Data arrays, one array per value in `positions`.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
labels : list of str, optional
Tick labels for the horizontal axis. If not given, integers
``1..len(data)`` are used.
positions : array_like, optional
Position array, used as the horizontal axis of the plot. If not given,
spacing of the violins will be equidistant.
side : {'both', 'left', 'right'}, optional
How to plot the violin. Default is 'both'. The 'left', 'right'
options can be used to create asymmetric violin plots.
show_boxplot : bool, optional
Whether or not to show normal box plots on top of the violins.
Default is True.
plot_opts : dict, optional
A dictionary with plotting options. Any of the following can be
provided, if not present in `plot_opts` the defaults will be used::
- 'violin_fc', MPL color. Fill color for violins. Default is 'y'.
- 'violin_ec', MPL color. Edge color for violins. Default is 'k'.
- 'violin_lw', scalar. Edge linewidth for violins. Default is 1.
- 'violin_alpha', float. Transparancy of violins. Default is 0.5.
- 'cutoff', bool. If True, limit violin range to data range.
Default is False.
- 'cutoff_val', scalar. Where to cut off violins if `cutoff` is
True. Default is 1.5 standard deviations.
- 'cutoff_type', {'std', 'abs'}. Whether cutoff value is absolute,
or in standard deviations. Default is 'std'.
- 'violin_width' : float. Relative width of violins. Max available
space is 1, default is 0.8.
- 'label_fontsize', MPL fontsize. Adjusts fontsize only if given.
- 'label_rotation', scalar. Adjusts label rotation only if given.
Specify in degrees.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
beanplot : Bean plot, builds on `violinplot`.
matplotlib.pyplot.boxplot : Standard boxplot.
Notes
-----
The appearance of violins can be customized with `plot_opts`. If
customization of boxplot elements is required, set `show_boxplot` to False
and plot it on top of the violins by calling the Matplotlib `boxplot`
function directly. For example::
violinplot(data, ax=ax, show_boxplot=False)
ax.boxplot(data, sym='cv', whis=2.5)
It can happen that the axis labels or tick labels fall outside the plot
area, especially with rotated labels on the horizontal axis. With
Matplotlib 1.1 or higher, this can easily be fixed by calling
``ax.tight_layout()``. With older Matplotlib one has to use ``plt.rc`` or
``plt.rcParams`` to fix this, for example::
plt.rc('figure.subplot', bottom=0.25)
violinplot(data, ax=ax)
References
----------
J.L. Hintze and R.D. Nelson, "Violin Plots: A Box Plot-Density Trace
Synergism", The American Statistician, Vol. 52, pp.181-84, 1998.
Examples
--------
We use the American National Election Survey 1996 dataset, which has Party
Identification of respondents as independent variable and (among other
data) age as dependent variable.
>>> data = sm.datasets.anes96.load_pandas()
>>> party_ID = np.arange(7)
>>> labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
... "Independent-Indpendent", "Independent-Republican",
... "Weak Republican", "Strong Republican"]
Group age by party ID, and create a violin plot with it:
>>> plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
>>> age = [data.exog['age'][data.endog == id] for id in party_ID]
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> sm.graphics.violinplot(age, ax=ax, labels=labels,
... plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
... 'label_fontsize':'small',
... 'label_rotation':30})
>>> ax.set_xlabel("Party identification of respondent.")
>>> ax.set_ylabel("Age")
>>> plt.show()
.. plot:: plots/graphics_boxplot_violinplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if positions is None:
positions = np.arange(len(data)) + 1
# Determine available horizontal space for each individual violin.
pos_span = np.max(positions) - np.min(positions)
width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('violin_width', 0.8) / 2.])
# Plot violins.
for pos_data, pos in zip(data, positions):
xvals, violin = _single_violin(ax, pos, pos_data, width, side,
plot_opts)
if show_boxplot:
ax.boxplot(data, notch=1, positions=positions, vert=1)
# Set ticks and tick labels of horizontal axis.
_set_ticks_labels(ax, data, labels, positions, plot_opts)
return fig
def _single_violin(ax, pos, pos_data, width, side, plot_opts):
""""""
def _violin_range(pos_data, plot_opts):
"""Return array with correct range, with which violins can be plotted."""
cutoff = plot_opts.get('cutoff', False)
cutoff_type = plot_opts.get('cutoff_type', 'std')
cutoff_val = plot_opts.get('cutoff_val', 1.5)
s = 0.0
if not cutoff:
if cutoff_type == 'std':
s = cutoff_val * np.std(pos_data)
else:
s = cutoff_val
x_lower = kde.dataset.min() - s
x_upper = kde.dataset.max() + s
return np.linspace(x_lower, x_upper, 100)
pos_data = np.asarray(pos_data)
# Kernel density estimate for data at this position.
kde = gaussian_kde(pos_data)
# Create violin for pos, scaled to the available space.
xvals = _violin_range(pos_data, plot_opts)
violin = kde.evaluate(xvals)
violin = width * violin / violin.max()
if side == 'both':
envelope_l, envelope_r = (-violin + pos, violin + pos)
elif side == 'right':
envelope_l, envelope_r = (pos, violin + pos)
elif side == 'left':
envelope_l, envelope_r = (-violin + pos, pos)
else:
msg = "`side` parameter should be one of {'left', 'right', 'both'}."
raise ValueError(msg)
# Draw the violin.
ax.fill_betweenx(xvals, envelope_l, envelope_r,
facecolor=plot_opts.get('violin_fc', 'y'),
edgecolor=plot_opts.get('violin_ec', 'k'),
lw=plot_opts.get('violin_lw', 1),
alpha=plot_opts.get('violin_alpha', 0.5))
return xvals, violin
def _set_ticks_labels(ax, data, labels, positions, plot_opts):
"""Set ticks and labels on horizontal axis."""
# Set xticks and limits.
ax.set_xlim([np.min(positions) - 0.5, np.max(positions) + 0.5])
ax.set_xticks(positions)
label_fontsize = plot_opts.get('label_fontsize')
label_rotation = plot_opts.get('label_rotation')
if label_fontsize or label_rotation:
from matplotlib.artist import setp
if labels is not None:
if not len(labels) == len(data):
msg = "Length of `labels` should equal length of `data`."
raise ValueError(msg)
xticknames = ax.set_xticklabels(labels)
if label_fontsize:
setp(xticknames, fontsize=label_fontsize)
if label_rotation:
setp(xticknames, rotation=label_rotation)
return
def beanplot(data, ax=None, labels=None, positions=None, side='both',
jitter=False, plot_opts={}):
"""Make a bean plot of each dataset in the `data` sequence.
A bean plot is a combination of a `violinplot` (kernel density estimate of
the probability density function per point) with a line-scatter plot of all
individual data points.
Parameters
----------
data : sequence of ndarrays
Data arrays, one array per value in `positions`.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
labels : list of str, optional
Tick labels for the horizontal axis. If not given, integers
``1..len(data)`` are used.
positions : array_like, optional
Position array, used as the horizontal axis of the plot. If not given,
spacing of the violins will be equidistant.
side : {'both', 'left', 'right'}, optional
How to plot the violin. Default is 'both'. The 'left', 'right'
options can be used to create asymmetric violin plots.
jitter : bool, optional
If True, jitter markers within violin instead of plotting regular lines
around the center. This can be useful if the data is very dense.
plot_opts : dict, optional
A dictionary with plotting options. All the options for `violinplot`
can be specified, they will simply be passed to `violinplot`. Options
specific to `beanplot` are:
- 'bean_color', MPL color. Color of bean plot lines. Default is 'k'.
Also used for jitter marker edge color if `jitter` is True.
- 'bean_size', scalar. Line length as a fraction of maximum length.
Default is 0.5.
- 'bean_lw', scalar. Linewidth, default is 0.5.
- 'bean_show_mean', bool. If True (default), show mean as a line.
- 'bean_show_median', bool. If True (default), show median as a
marker.
- 'bean_mean_color', MPL color. Color of mean line. Default is 'b'.
- 'bean_mean_lw', scalar. Linewidth of mean line, default is 2.
- 'bean_median_color', MPL color. Color of median marker. Default
is 'r'.
- 'bean_median_marker', MPL marker. Marker type, default is '+'.
- 'jitter_marker', MPL marker. Marker type for ``jitter=True``.
Default is 'o'.
- 'jitter_marker_size', int. Marker size. Default is 4.
- 'jitter_fc', MPL color. Jitter marker face color. Default is None.
- 'bean_legend_text', str. If given, add a legend with given text.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
violinplot : Violin plot, also used internally in `beanplot`.
matplotlib.pyplot.boxplot : Standard boxplot.
References
----------
P. Kampstra, "Beanplot: A Boxplot Alternative for Visual Comparison of
Distributions", J. Stat. Soft., Vol. 28, pp. 1-9, 2008.
Examples
--------
We use the American National Election Survey 1996 dataset, which has Party
Identification of respondents as independent variable and (among other
data) age as dependent variable.
>>> data = sm.datasets.anes96.load_pandas()
>>> party_ID = np.arange(7)
>>> labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
... "Independent-Indpendent", "Independent-Republican",
... "Weak Republican", "Strong Republican"]
Group age by party ID, and create a violin plot with it:
>>> plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
>>> age = [data.exog['age'][data.endog == id] for id in party_ID]
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> sm.graphics.beanplot(age, ax=ax, labels=labels,
... plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
... 'label_fontsize':'small',
... 'label_rotation':30})
>>> ax.set_xlabel("Party identification of respondent.")
>>> ax.set_ylabel("Age")
>>> plt.show()
.. plot:: plots/graphics_boxplot_beanplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if positions is None:
positions = np.arange(len(data)) + 1
# Determine available horizontal space for each individual violin.
pos_span = np.max(positions) - np.min(positions)
width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('bean_size', 0.5) / 2.])
legend_txt = plot_opts.get('bean_legend_text', None)
for pos_data, pos in zip(data, positions):
# Draw violins.
xvals, violin = _single_violin(ax, pos, pos_data, width, side, plot_opts)
if jitter:
# Draw data points at random coordinates within violin envelope.
jitter_coord = pos + _jitter_envelope(pos_data, xvals, violin, side)
ax.plot(jitter_coord, pos_data, ls='',
marker=plot_opts.get('jitter_marker', 'o'),
ms=plot_opts.get('jitter_marker_size', 4),
mec=plot_opts.get('bean_color', 'k'),
mew=1, mfc=plot_opts.get('jitter_fc', 'none'),
label=legend_txt)
else:
# Draw bean lines.
ax.hlines(pos_data, pos - width, pos + width,
lw=plot_opts.get('bean_lw', 0.5),
color=plot_opts.get('bean_color', 'k'),
label=legend_txt)
# Show legend if required.
if legend_txt is not None:
_show_legend(ax)
legend_txt = None # ensure we get one entry per call to beanplot
# Draw mean line.
if plot_opts.get('bean_show_mean', True):
ax.hlines(np.mean(pos_data), pos - width, pos + width,
lw=plot_opts.get('bean_mean_lw', 2.),
color=plot_opts.get('bean_mean_color', 'b'))
# Draw median marker.
if plot_opts.get('bean_show_median', True):
ax.plot(pos, np.median(pos_data),
marker=plot_opts.get('bean_median_marker', '+'),
color=plot_opts.get('bean_median_color', 'r'))
# Set ticks and tick labels of horizontal axis.
_set_ticks_labels(ax, data, labels, positions, plot_opts)
return fig
def _jitter_envelope(pos_data, xvals, violin, side):
"""Determine envelope for jitter markers."""
if side == 'both':
low, high = (-1., 1.)
elif side == 'right':
low, high = (0, 1.)
elif side == 'left':
low, high = (-1., 0)
else:
raise ValueError("`side` input incorrect: %s" % side)
jitter_envelope = np.interp(pos_data, xvals, violin)
jitter_coord = jitter_envelope * np.random.uniform(low=low, high=high,
size=pos_data.size)
return jitter_coord
def _show_legend(ax):
"""Utility function to show legend."""
leg = ax.legend(loc=1, shadow=True, fancybox=True, labelspacing=0.2,
borderpad=0.15)
ltext = leg.get_texts()
llines = leg.get_lines()
frame = leg.get_frame()
from matplotlib.artist import setp
setp(ltext, fontsize='small')
setp(llines, linewidth=1)
| bsd-3-clause |
LionelR/pyair | pyair/reg.py | 2 | 23760 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Facilités pour le calcul de données réglementaire
"""
import unicodedata
import pandas as pd
import pandas.tseries.offsets as pdoffset
from io import StringIO
class FreqException(Exception):
def __init__(self, err):
self.err = "Erreur de fréquence : %s" % err
def __str__(self):
return self.err
def moyennes_glissantes(df, sur=8, rep=0.75):
"""
Calcule de moyennes glissantes
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
sur: (int, par défaut 8) Nombre d'observations sur lequel s'appuiera le
calcul
rep: (float, défaut 0.75) Taux de réprésentativité en dessous duquel le
calcul renverra NaN
Retourne:
Un DataFrame des moyennes glissantes calculées
"""
return pd.rolling_mean(df, window=sur, min_periods=rep * sur)
def consecutive(df, valeur, sur=3):
"""Calcule si une valeur est dépassée durant une période donnée. Détecte
un dépassement de valeur sur X heures/jours/... consécutifs
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
valeur: (float) valeur à chercher le dépassement (strictement supérieur à)
sur: (int) Nombre d'observations consécutives où la valeur doit être dépassée
Retourne:
Un DataFrame de valeurs, de même taille (shape) que le df d'entrée, dont toutes
les valeurs sont supprimées, sauf celles supérieures à la valeur de référence
et positionnées sur les heures de début de dépassements
"""
dep = pd.rolling_max(df.where(df > valeur), window=sur, min_periods=sur)
return dep
def depassement(df, valeur):
"""
Calcule les dépassements d'une valeur.
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
valeur: (float) valeur à chercher le dépassement (strictement supérieur à)
Retourne:
Un DataFrame de valeurs, de même taille (shape) que le df d'entrée, dont toutes
les valeurs sont supprimées, sauf celles supérieures à la valeur de référence
"""
dep = df.where(df > valeur)
return dep
def nombre_depassement(df, valeur, freq=None):
"""
Calcule le nombre de dépassement d'une valeur sur l'intégralité du temps,
ou suivant un regroupement temporel.
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
valeur: (float) valeur à chercher le dépassement (strictement supérieur à)
freq: (str ou None): Fréquence de temps sur lequel effectué un regroupement.
freq peut prendre les valeurs 'H' pour heure, 'D' pour jour, 'W' pour semaine,
'M' pour mois et 'A' pour année, ou None pour ne pas faire de regroupement.
Le nombre de dépassement sera alors regroupé suivant cette fréquence de temps.
Retourne:
Une Series du nombre de dépassement, total suivant la fréquence intrinsèque
du DataFrame d'entrée, ou aggloméré suivant la fréquence de temps choisie.
"""
dep = depassement(df, valeur)
if freq is not None:
dep = dep.resample(freq, how='sum')
return dep.count()
def aot40_vegetation(df, nb_an):
"""
Calcul de l'AOT40 du 1er mai au 31 juillet
*AOT40 : AOT 40 ( exprimé en micro g/m³ par heure ) signifie la somme des
différences entre les concentrations horaires supérieures à 40 parties par
milliard ( 40 ppb soit 80 micro g/m³ ), durant une période donnée en
utilisant uniquement les valeurs sur 1 heure mesurées quotidiennement
entre 8 heures (début de la mesure) et 20 heures (pile, fin de la mesure) CET,
ce qui correspond à de 8h à 19h TU (donnant bien 12h de mesures, 8h donnant
la moyenne horaire de 7h01 à 8h00)
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
nb_an: (int) Nombre d'années contenu dans le df, et servant à diviser le
résultat retourné
Retourne:
Un DataFrame de résultat de calcul
"""
return _aot(df.tshift(1), nb_an=nb_an, limite=80, mois_debut=5, mois_fin=7,
heure_debut=8, heure_fin=19)
def aot40_foret(df, nb_an):
"""
Calcul de l'AOT40 du 1er avril au 30 septembre
*AOT40 : AOT 40 ( exprimé en micro g/m³ par heure ) signifie la somme des
différences entre les concentrations horaires supérieures à 40 parties par
milliard ( 40 ppb soit 80 micro g/m³ ), durant une période donnée en
utilisant uniquement les valeurs sur 1 heure mesurées quotidiennement
entre 8 heures et 20 heures (CET) <==> 7h à 19h TU
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
nb_an: (int) Nombre d'années contenu dans le df, et servant à diviser le
résultat retourné
Retourne:
Un DataFrame de résultat de calcul
"""
return _aot(df.tshift(1), nb_an=nb_an, limite=80, mois_debut=4, mois_fin=9,
heure_debut=8, heure_fin=19)
def _aot(df, nb_an=1, limite=80, mois_debut=5, mois_fin=7,
heure_debut=7, heure_fin=19):
"""
Calcul de l'AOT de manière paramètrable. Voir AOT40_vegetation ou
AOT40_foret pour des paramètres préalablement fixés.
Paramètres:
df: DataFrame de mesures sur lequel appliqué le calcul
nb_an: (int) Nombre d'années contenu dans le df, et servant à diviser le
résultat retourné
limite: (float) valeur limite au delà de laquelle les différences seront
additionnées pour calculer l'AOT
mois_debut: (int) mois de début de calcul
mois_fin: (int) mois de fin de calcul
heure_debut: (int) première heure de chaque jour après laquelle les valeurs
sont retenues
heure_fin: (int) dernière heure de chaque jour avant laquelle les valeurs
sont retenues
Retourne:
Un DataFrame de résultat de calcul
"""
res = df[(df.index.month >= mois_debut) & (df.index.month <= mois_fin) &
(df.index.hour >= heure_debut) & (df.index.hour <= heure_fin)]
nb_valid = res.count()
nb_total = res.shape[0]
pcent = nb_valid.astype(pd.np.float) / nb_total * 100
brut = (res[res > limite] - limite) / nb_an
brut = brut.sum()
net = brut / nb_valid * nb_total
print("""{total} mesures au totales
du {m_d} au {m_f}
entre {h_d} et {h_f}""".format(total=nb_total,
m_d=mois_debut, m_f=mois_fin,
h_d=heure_debut, h_f=heure_fin
)
)
aot = pd.DataFrame([brut.round(), nb_valid.round(), pcent.round(), net.round()],
index=['brutes', 'mesures valides', '% de rep.', 'nettes'])
return aot
def compresse(df):
"""
Compresse un DataFrame en supprimant les lignes dont toutes les Valeurs
(colonnes) sont vides. Si au moins une valeur est présente sur la ligne, alors
elle est conservée.
Paramètres:
df: DataFrame a présenter
Retourne:
Un DataFrame réduit à son strict minimum"""
return df.dropna(how='all')
def no2(df):
"""
Calculs réglementaires pour le dioxyde d'azote
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 200u
Seuil d'Alerte sur 3H consécutives: 400u
Seuil d'Alerte sur 3J consécutifs: 200u
Valeur limite pour la santé humaine 18H/A: 200u
Valeur limite pour la santé humaine en moyenne A: 40u
Objectif de qualité en moyenne A: 40u
Protection de la végétation en moyenne A: 30u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = "NO2"
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI en moyenne H: 200u": depassement(df, valeur=200),
"Seuil d'Alerte sur 3H consécutives: 400u": consecutive(df, valeur=400, sur=3),
"Seuil d'Alerte sur 3J consécutifs: 200u": consecutive(df.resample('D', how='max'), valeur=200, sur=3),
"Valeur limite pour la santé humaine 18H/A: 200u": depassement(df, valeur=200),
"Valeur limite pour la santé humaine en moyenne A: 40u": depassement(df.resample('A', how='mean'),
valeur=40),
"Objectif de qualité en moyenne A: 40u": depassement(df.resample('A', how='mean'), valeur=40),
"Protection de la végétation en moyenne A: 30u": depassement(df.resample('A', how='mean'), valeur=30),
}
return polluant, res
def pm10(df):
"""
Calculs réglementaires pour les particules PM10
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne J: 50u
Seuil d'Alerte en moyenne J: 80u
Valeur limite pour la santé humaine 35J/A: 50u
Valeur limite pour la santé humaine en moyenne A: 40u
Objectif de qualité en moyenne A: 30u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'PM10'
# Le DataFrame doit être en jour
if not isinstance(df.index.freq, pdoffset.Day):
raise FreqException("df doit être en jour.")
res = {"Seuil de RI en moyenne J: 50u": depassement(df, valeur=50),
"Seuil d'Alerte en moyenne J: 80u": depassement(df, valeur=80),
"Valeur limite pour la santé humaine 35J/A: 50u": depassement(df, valeur=50),
"Valeur limite pour la santé humaine en moyenne A: 40u": depassement(df.resample('A', how='mean'),
valeur=40),
"Objectif de qualité en moyenne A: 30u": depassement(df.resample('A', how='mean'), valeur=30),
}
return polluant, res
def so2(df):
"""
Calculs réglementaires pour le dioxyde de soufre
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 300u
Seuil d'Alerte sur 3H consécutives: 500u
Valeur limite pour la santé humaine 24H/A: 350u
Valeur limite pour la santé humaine 3J/A: 125u
Objectif de qualité en moyenne A: 50u
Protection de la végétation en moyenne A: 20u
Protection de la végétation du 01/10 au 31/03: 20u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'SO2'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI en moyenne H: 300u": depassement(df, valeur=300),
"Seuil d'Alerte sur 3H consécutives: 500u": depassement(df, valeur=500),
"Valeur limite pour la santé humaine 24H/A: 350u": depassement(df, valeur=350),
"Valeur limite pour la santé humaine 3J/A: 125u": depassement(df.resample('D', how='mean'), valeur=125),
"Objectif de qualité en moyenne A: 50u": depassement(df.resample('A', how='mean'), valeur=50),
"Protection de la végétation en moyenne A: 20u": depassement(df.resample('A', how='mean'), valeur=20),
"Protection de la végétation du 01/10 au 31/03: 20u": depassement(
df[(df.index.month <= 3) | (df.index.month >= 10)], valeur=20),
}
return polluant, res
def co(df):
"""
Calculs réglementaires pour le monoxyde de carbone
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Valeur limite pour la santé humaine max J 8H glissantes: 10000u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'CO'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Valeur limite pour la santé humaine sur 8H glissantes: 10000u": depassement(moyennes_glissantes(df, sur=8),
valeur=10),
}
return polluant, res
def o3(df):
"""
Calculs réglementaires pour l'ozone
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI sur 1H: 180u
Seuil d'Alerte sur 1H: 240u
Seuil d'Alerte sur 3H consécutives: 240u
Seuil d'Alerte sur 3H consécutives: 300u
Seuil d'Alerte sur 1H: 360u
Objectif de qualité pour la santé humaine sur 8H glissantes: 120u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'O3'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI sur 1H: 180u": depassement(df, valeur=180),
"Seuil d'Alerte sur 1H: 240u": depassement(df, valeur=240),
"Seuil d'Alerte sur 1H: 360u": depassement(df, valeur=360),
"Seuil d'Alerte sur 3H consécutives: 240u": consecutive(df, valeur=240, sur=3),
"Seuil d'Alerte sur 3H consécutives: 300u": consecutive(df, valeur=300, sur=3),
"Objectif de qualité pour la santé humaine sur 8H glissantes: 120u": depassement(
moyennes_glissantes(df, sur=8), valeur=120),
}
return polluant, res
def c6h6(df):
"""
Calculs réglementaires pour le benzène
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Objectif de qualité en moyenne A: 2u
Valeur limite pour la santé humaine en moyenne A: 5u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'C6H6'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Objectif de qualité en moyenne A: 2u": depassement(df.resample('A', how='mean'), valeur=2),
"Valeur limite pour la santé humaine en moyenne A: 5u": depassement(df.resample('A', how='mean'), valeur=5),
}
return polluant, res
def pb(df):
"""
Calculs réglementaires pour le plomb
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Objectif de qualité en moyenne A: 0.25u
Valeur limite pour la santé humaine en moyenne A: 0.5u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'Pb'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Objectif de qualité en moyenne A: 0.25u": depassement(df.resample('A', how='mean'), valeur=0.25),
"Valeur limite pour la santé humaine en moyenne A: 0.5u": depassement(df.resample('A', how='mean'),
valeur=0.5),
}
return polluant, res
def arsenic(df):
"""
Calculs réglementaires pour l'arsenic
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): ng/m3 (nanogramme par mètre cube)
Valeur cible en moyenne A: 6u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'As'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Valeur cible en moyenne A: 6u": depassement(df.resample('A', how='mean'), valeur=6),
}
return polluant, res
def cadmium(df):
"""
Calculs réglementaires pour le cadmium
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): ng/m3 (nanogramme par mètre cube)
Valeur cible en moyenne A: 5u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'Cd'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Valeur cible en moyenne A: 5u": depassement(df.resample('A', how='mean'), valeur=5),
}
return polluant, res
def nickel(df):
"""
Calculs réglementaires pour le nickel
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): ng/m3 (nanogramme par mètre cube)
Valeur cible en moyenne A: 20u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'Ni'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Valeur cible en moyenne A: 20u": depassement(df.resample('A', how='mean'), valeur=20),
}
return polluant, res
def bap(df):
"""
Calculs réglementaires pour le benzo(a)pyrène
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): ng/m3 (nanogramme par mètre cube)
Valeur cible en moyenne A: 1u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'BaP'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Valeur cible en moyenne A: 1u": depassement(df.resample('A', how='mean'), valeur=1),
}
return polluant, res
def print_synthese(fct, df):
"""
Présente une synthèse des calculs réglementaires en fournissant les valeurs
calculées suivant les réglementations définies dans chaque fonction de calcul
et un tableau de nombre de dépassement.
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
Retourne:
Imprime sur l'écran les valeurs synthétisées
"""
res_count = dict()
polluant, res = fct(df)
print("\nPour le polluant: %s" % polluant)
print("\nValeurs mesurées suivant critères:")
for k, v in res.items():
comp = compresse(v)
if not comp.empty:
comp.index.name = k
print(comp.to_string(na_rep='', float_format=lambda x: "%.0f" % x))
else:
print("\n%s: aucune valeur en dépassement" % k)
res_count[k] = v.count()
res_count = pd.DataFrame(res_count).T
print("Nombre de dépassements des critères:\n")
print(res_count)
def excel_synthese(fct, df, excel_file):
"""
Enregistre dans un fichier Excel une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Les résultats sont enregistrés
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
excel_file: Chemin du fichier excel où écrire les valeurs
Retourne:
Rien
"""
def sheet_name(name):
# formatage du nom des feuilles (suppression des guillements, :, ...)
name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')
name = k.replace("'", "").replace(":", "").replace(" ", "_")
name = "%i-%s" % (i, name)
name = name[:31]
return name
res_count = dict()
polluant, res = fct(df)
print("\nTraitement du polluant: %s" % polluant)
writer = pd.ExcelWriter(excel_file)
# Valeurs mesurées suivant critères
for i, (k, v) in enumerate(res.items()):
comp = compresse(v)
comp.index.name = k
comp = comp.apply(pd.np.round)
comp.to_excel(writer, sheet_name=sheet_name(k))
res_count[k] = v.count()
# Nombre de dépassements des critères
name = "Nombre_de_depassements"
res_count = pd.DataFrame(res_count).T
res_count.index.name = name
res_count.to_excel(writer, sheet_name=name)
writer.save()
def html_synthese(fct, df):
"""
Retourne au format html une synthèse des calculs réglementaires en
fournissant les valeurs calculées suivant les réglementations définies dans
chaque fonction de calcul et un tableau de nombre de dépassement.
Paramètres:
fct: fonction renvoyant les éléments calculées
df: DataFrame de valeurs d'entrée à fournir à la fonction
Retourne:
Une chaine de caractère prête à être utilisé dans une page html
"""
html = str()
res_count = dict()
buf = StringIO()
polluant, res = fct(df)
html += '<p style="text-align:center"><h2>Pour le polluant: {}</h2></p>'.format(polluant)
# On enregistre tous les résultats dans le buffer et on calcule la somme de chaque
for k, v in res.items():
buf.write("<p>")
comp = compresse(v)
if not comp.empty:
comp.index.name = k
comp.to_html(buf=buf,
sparsify=True,
na_rep="")
else:
buf.write(
'<table border="1" class="dataframe"><thead><tr style="text-align: right;"><th>{}</th><th>Aucun dépassement</th></tr></table>'.format(
k))
buf.write("</p>")
res_count[k] = v.count()
res_count = pd.DataFrame(res_count).T
res_count.index.name = "Nombre de dépassements des critères"
html += "<p>"
html += res_count.to_html(sparsify=True)
html += "</p>"
html += buf.getvalue()
return html
def show_max(df):
"""Pour chaque serie (colonne) d'un DataFrame, va rechercher la (les) valeur(s)
et la (les) date(s) du (des) max.
Paramètres:
df: DataFrame de valeurs à calculer
Retourne:
Un DataFrame montrant pour chaque serie (colonne), les valeurs maxs aux dates
d'apparition.
"""
df = df.astype(pd.np.float)
res = list()
for c in df.columns:
serie = df[c]
res.append(serie.where(cond=serie == serie.max(), other=pd.np.nan).dropna())
return pd.DataFrame(res).T
def taux_de_representativite(df):
"""Calcul le taux de représentativité d'un dataframe"""
return (df.count().astype(pd.np.float) / df.shape[0] * 100).round(1)
| mit |
syl20bnr/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/ticker.py | 69 | 37420 | """
Tick locating and formatting
============================
This module contains classes to support completely configurable tick
locating and formatting. Although the locators know nothing about
major or minor ticks, they are used by the Axis class to support major
and minor tick locating and formatting. Generic tick locators and
formatters are provided, as well as domain specific custom ones..
Tick locating
-------------
The Locator class is the base class for all tick locators. The
locators handle autoscaling of the view limits based on the data
limits, and the choosing of tick locations. A useful semi-automatic
tick locator is MultipleLocator. You initialize this with a base, eg
10, and it picks axis limits and ticks that are multiples of your
base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (eg. where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, eg no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
no labels on the ticks
:class:`FixedFormatter`
set the strings manually for the labels
:class:`FuncFormatter`
user defined function sets the labels
:class:`FormatStrFormatter`
use a sprintf format string
:class:`ScalarFormatter`
default formatter for scalars; autopick the fmt string
:class:`LogFormatter`
formatter for log axes
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has access
to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major an minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
class TickHelper:
axis = None
class DummyAxis:
def __init__(self):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self):
if self.axis is None:
self.axis = self.DummyAxis()
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Convert the tick location to a string
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
'Return the format for tick val x at position pos; pos=None indicated unspecified'
raise NotImplementedError('Derived must overide')
def format_data(self,value):
return self.__call__(value)
def format_data_short(self,value):
'return a short string version'
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
some classes may want to replace a hyphen for minus with the
proper unicode symbol as described `here
<http://sourceforge.net/tracker/index.php?func=detail&aid=1962574&group_id=80706&atid=560720>`_.
The default is to do nothing
Note, if you use this method, eg in :meth`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interative coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class NullFormatter(Formatter):
'Always return the empty string'
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return ''
class FixedFormatter(Formatter):
'Return fixed strings for tick labels'
def __init__(self, seq):
"""
seq is a sequence of strings. For positions `i<len(seq)` return
*seq[i]* regardless of *x*. Otherwise return ''
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if pos is None or pos>=len(self.seq): return ''
else: return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
User defined function for formatting
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use a format string to format the tick
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt % x
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x,d)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Tick location is a plain old number. If useOffset==True and the data range
is much smaller than the data average, then an offset will be determined
such that the tick labels are meaningful. Scientific notation is used for
data < 1e-3 or data >= 1e4.
"""
def __init__(self, useOffset=True, useMathText=False):
# useOffset allows plotting small data ranges with large offsets:
# for example: [1+1e-9,1+2e-9,1+3e-9]
# useMathText will render the offset and scientific notation in mathtext
self._useOffset = useOffset
self._usetex = rcParams['text.usetex']
self._useMathText = useMathText
self.offset = 0
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
def fix_minus(self, s):
'use a unicode minus rather than hyphen'
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']: return s
else: return s.replace('-', u'\u2212')
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if len(self.locs)==0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
'''True or False to turn scientific notation on or off
see also :meth:`set_powerlimits`
'''
self._scientific = bool(b)
def set_powerlimits(self, lims):
'''
Sets size thresholds for scientific notation.
e.g. ``xaxis.set_powerlimits((-3, 4))`` sets the pre-2007 default in
which scientific notation is used for numbers less than
1e-3 or greater than 1e4.
See also :meth:`set_scientific`.
'''
assert len(lims) == 2, "argument must be a sequence of length 2"
self._powerlimits = lims
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def format_data(self,value):
'return a formatted string representation of a number'
s = self._formatSciNotation('%1.10e'% value)
return self.fix_minus(s)
def get_offset(self):
"""Return scientific notation, plus offset"""
if len(self.locs)==0: return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0: offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10**self.orderOfMagnitude)
else:
sciNotStr = '1e%d'% self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = ''.join(('$',sciNotStr,r'\mathdefault{',offsetStr,'}$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$',sciNotStr,offsetStr,'$'))
else:
s = ''.join((sciNotStr,offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
'set the locations of the ticks'
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax-vmin)
if self._useOffset: self._set_offset(d)
self._set_orderOfMagnitude(d)
self._set_format()
def _set_offset(self, range):
# offset of 20,001 is 20,000, for example
locs = self.locs
if locs is None or not len(locs) or range == 0:
self.offset = 0
return
ave_loc = np.mean(locs)
if ave_loc: # dont want to take log10(0)
ave_oom = math.floor(math.log10(np.mean(np.absolute(locs))))
range_oom = math.floor(math.log10(range))
if np.absolute(ave_oom-range_oom) >= 3: # four sig-figs
if ave_loc < 0:
self.offset = math.ceil(np.max(locs)/10**range_oom)*10**range_oom
else:
self.offset = math.floor(np.min(locs)/10**(range_oom))*10**(range_oom)
else: self.offset = 0
def _set_orderOfMagnitude(self,range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset: oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]: val = locs[0]
else: val = locs[-1]
if val == 0: oom = 0
else: oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self):
# set the format string to format all the ticklabels
# The floating point black magic (adding 1e-15 and formatting
# to 8 digits) may warrant review and cleanup.
locs = (np.asarray(self.locs)-self.offset) / 10**self.orderOfMagnitude+1e-15
sigfigs = [len(str('%1.8f'% loc).split('.')[1].rstrip('0')) \
for loc in locs]
sigfigs.sort()
self.format = '%1.' + str(sigfigs[-1]) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$\mathdefault{%s}$' % self.format
def pprint_val(self, x):
xp = (x-self.offset)/10**self.orderOfMagnitude
if np.absolute(xp) < 1e-8: xp = 0
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}'%(sign, exponent)
if significand and exponent:
return r'%s{\times}%s'%(significand, exponent)
else:
return r'%s%s'%(significand, exponent)
else:
s = ('%se%s%s' %(significand, sign, exponent)).rstrip('e')
return s
except IndexError, msg:
return s
class LogFormatter(Formatter):
"""
Format values for log axis;
if attribute *decadeOnly* is True, only the decades will be labelled.
"""
def __init__(self, base=10.0, labelOnlyBase = True):
"""
*base* is used to locate the decade tick,
which will be the only one to be labeled if *labelOnlyBase*
is ``False``
"""
self._base = base+0.0
self.labelOnlyBase=labelOnlyBase
self.decadeOnly = True
def base(self,base):
'change the *base* for labeling - warning: should always match the base used for :class:`LogLocator`'
self._base=base
def label_minor(self,labelOnlyBase):
'switch on/off minor ticks labeling'
self.labelOnlyBase=labelOnlyBase
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
b=self._base
if x == 0.0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
elif x>10000: s= '%1.0e'%x
elif x<1: s = '%1.0e'%x
else : s = self.pprint_val(x,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
def format_data(self,value):
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = True
return value
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def is_decade(self, x):
n = self.nearest_long(x)
return abs(x-n)<1e-10
def nearest_long(self, x):
if x==0: return 0L
elif x>0: return long(x+0.5)
else: return long(x-0.5)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
b=self._base
if x == 0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
#if 0: pass
elif fx>10000: s= '%1.0e'%fx
#elif x<1: s = '$10^{%d}$'%fx
#elif x<1: s = '10^%d'%fx
elif fx<1: s = '%1.0e'%fx
else : s = self.pprint_val(fx,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
b = self._base
# only label the decades
if x == 0:
return '$0$'
sign = np.sign(x)
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
usetex = rcParams['text.usetex']
if sign == -1:
sign_string = '-'
else:
sign_string = ''
if not isDecade and self.labelOnlyBase: s = ''
elif not isDecade:
if usetex:
s = r'$%s%d^{%.2f}$'% (sign_string, b, fx)
else:
s = '$\mathdefault{%s%d^{%.2f}}$'% (sign_string, b, fx)
else:
if usetex:
s = r'$%s%d^{%d}$'% (sign_string, b, self.nearest_long(fx))
else:
s = r'$\mathdefault{%s%d^{%d}}$'% (sign_string, b, self.nearest_long(fx))
return s
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different :class:`~matplotlib.axis.Axis`
because the locator stores references to the Axis data and view
limits
"""
def __call__(self):
'Return the locations of the ticks'
raise NotImplementedError('Derived must override')
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally This will be overridden.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
'autoscale the view limits'
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
'Pan numticks (can be positive or negative)'
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if numticks>2:
step = numsteps*abs(ticks[0]-ticks[1])
else:
d = abs(vmax-vmin)
step = numsteps*d/6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
interval = abs(vmax-vmin)
step = 0.1*interval*direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
'refresh internal information based on current lim'
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, eg on every 5th point. It is assumed that you are doing
index plotting; ie the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def __call__(self):
'Return the locations of the ticks'
dmin, dmax = self.axis.get_data_interval()
return np.arange(dmin + self.offset, dmax+1, self._base)
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
"""
def __init__(self, locs, nbins=None):
self.locs = locs
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def __call__(self):
'Return the locations of the ticks'
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
return self.locs[::step]
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
'Return the locations of the ticks'
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks = None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if vmax<vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks==0: return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return ticklocs
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
if vmin==vmax:
vmin-=1
vmax+=1
exponent, remainder = divmod(math.log10(vmax - vmin), 1)
if remainder < 0.5:
exponent -= 1
scale = 10**(-exponent)
vmin = math.floor(scale*vmin)/scale
vmax = math.ceil(scale*vmax)/scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x,y):
if abs(x-y)<1e-10: return True
else: return False
class Base:
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
assert(base>0)
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return (d-1)*self._base
return d*self._base
def le(self, x):
'return the largest multiple of base <= x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1): # was closeto(m, self._base)
#looks like floating point error
return (d+1)*self._base
return d*self._base
def gt(self, x):
'return the smallest multiple of base > x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1):
#looks like floating point error
return (d+2)*self._base
return (d+1)*self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return d*self._base
return (d+1)*self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
if vmax<vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001*base)//base
locs = vmin + np.arange(n+1) * base
return locs
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin==vmax:
vmin -=1
vmax +=1
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n = 1, threshold=100):
dv = abs(vmax - vmin)
maxabsv = max(abs(vmin), abs(vmax))
if maxabsv == 0 or dv/maxabsv < 1e-12:
return 1.0, 0.0
meanv = 0.5*(vmax+vmin)
if abs(meanv)/dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(math.log10(meanv), 1)[0]
offset = 10**ex
else:
ex = divmod(math.log10(-meanv), 1)[0]
offset = -10**ex
ex = divmod(math.log10(dv/n), 1)[0]
scale = 10**ex
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
def __init__(self, nbins = 10, steps = None,
trim = True,
integer=False,
symmetric=False):
self._nbins = int(nbins)
self._trim = trim
self._integer = integer
self._symmetric = symmetric
if steps is None:
self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
if integer:
self._steps = [n for n in self._steps if divmod(n,1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin -= offset
vmax -= offset
raw_step = (vmax-vmin)/nbins
scaled_raw_step = raw_step/scale
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step*divmod(vmin, step)[0]
best_vmax = best_vmin + step*nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins+1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
return self.bin_boundaries(vmin, vmax)
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander = 0.05)
return np.take(self.bin_boundaries(dmin, dmax), [0,-1])
def decade_down(x, base=10):
'floor x to the nearest lower decade'
lx = math.floor(math.log(x)/math.log(base))
return base**lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
lx = math.ceil(math.log(x)/math.log(base))
return base**lx
def is_decade(x,base=10):
lx = math.log(x)/math.log(base)
return lx==int(lx)
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
self.numticks = 15
def base(self,base):
"""
set the base of the log scaling (major tick every base**i, i interger)
"""
self._base=base+0.0
def subs(self,subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs)+0.0
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b=self._base
vmin, vmax = self.axis.get_view_interval()
if vmin <= 0.0:
vmin = self.axis.get_minpos()
if vmin <= 0.0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
vmin = math.log(vmin)/math.log(b)
vmax = math.log(vmax)/math.log(b)
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None: # autosub
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin),
math.ceil(vmax)+stride, stride)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b**decades:
ticklocs.extend( subs*decadeStart )
else:
ticklocs = b**decades
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
minpos = self.axis.get_minpos()
if minpos<=0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin,self._base): vmin = decade_down(vmin,self._base)
if not is_decade(vmax,self._base): vmax = decade_up(vmax,self._base)
if vmin==vmax:
vmin = decade_down(vmin,self._base)
vmax = decade_up(vmax,self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, transform, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self._transform = transform
self._subs = subs
self.numticks = 15
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b = self._transform.base
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = self._transform.transform((vmin, vmax))
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None:
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin), math.ceil(vmax)+stride, stride)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * (np.sign(decade) * b ** np.abs(decade)))
else:
ticklocs = np.sign(decades) * b ** np.abs(decades)
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._transform.base
if vmax<vmin:
vmin, vmax = vmax, vmin
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class AutoLocator(MaxNLocator):
def __init__(self):
MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10])
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d<=0:
locator = MultipleLocator(0.2)
else:
try: ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10**fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5*base : ticksize = base
elif d >= 2*base : ticksize = base/2.0
else : ticksize = base/5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'ScalarFormatter', 'LogFormatter', 'LogFormatterExponent',
'LogFormatterMathtext', 'Locator', 'IndexLocator',
'FixedLocator', 'NullLocator', 'LinearLocator',
'LogLocator', 'AutoLocator', 'MultipleLocator',
'MaxNLocator', )
| gpl-3.0 |
meduz/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 49 | 3080 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import clean_warning_registry
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_array_equal(chi2.get_support(indices=True), [0])
assert_array_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float64)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_array_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chi2_unused_feature():
# Unused feature should evaluate to NaN
# and should issue no runtime warning
clean_warning_registry()
with warnings.catch_warnings(record=True) as warned:
warnings.simplefilter('always')
chi, p = chi2([[1, 0], [0, 0]], [1, 0])
for w in warned:
if 'divide by zero' in repr(w):
raise AssertionError('Found unexpected warning %s' % w)
assert_array_equal(chi, [1, np.nan])
assert_array_equal(p[1], np.nan)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
mjescobar/RF_Estimation | STA/helpers/molido/STA_FAST_v73.py | 4 | 22491 | #!/usr/bin/env python
#============================================================
# STA FAST CHAIN
# SPIKE TRIGGERED AVERAGE (STA) ALGORITHM FAST VERSION
# Do STA for a list of Unit Cells.
# AASTUDILLO ABRIL 2014
# 29 ABRIL2014
#
# This script use as stimuli ensemble a mat file containing
# the stimuli in its true dimensions 20x20 px.
#============================================================
#============================================================
# Package import section:
#============================================================
#============================================================
# Package import section:
#============================================================
import matplotlib # Graph and plot library
matplotlib.use("Agg") # for save images without show it in windows (for server use)
import pylab as pl
import matplotlib.cm as cm # plot lib
import matplotlib.pyplot as plt # plot lib (for figures)
import mpl_toolkits.mplot3d.axes3d as p3d # 3D Plot lib
from matplotlib.pyplot import * # plot lib python
from matplotlib.ticker import NullFormatter # ticker formatter
#-----------------------------------------------------------
# Methods, Signal processing, etc:
#-----------------------------------------------------------
import scipy # numerical methods lib (like Matlab functions)
import scipy.io # input output lib (for save matlab matrix)
import scipy.signal as signal # signal processing lib
import numpy as np # numerical methods lib
import sys # system lib
import os # operative system lib
import random # Random number methods
import time # System timer options
import scipy.misc as scim # scientific python package for image basic process
import glob # package for get file names from files in a folder
import matplotlib.pyplot as plt
from pylab import * # laboratory and plot methods lib
from scipy import misc
from PIL import Image, ImageChops
import argparse #argument parsing
# import gc # garbage collector
#=============================================
# Inputs
#=============================================
parser = argparse.ArgumentParser(prog='STA_FAST.py',
description='Performs STA from a stimuli ensemble',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--getimagenames', default=0,
help='0,1 load image name list with the stimulus ensemble',
type=int, choices=[0, 1], required=False)
parser.add_argument('--openimagesandwrite', default=0,
help='0,1 DO TESTS FOR READ AND WRITE IMAGES',
type=int, choices=[0, 1], required=False)
parser.add_argument('--calculatemeanrf', default=0,
help='0,1 LOAD ALL THE IMAGES FROM THE STIMULOS ENSEMBLE '+
'AND CALCULATE THE MEAN STIMULUS',
type=int, choices=[0, 1], required=False)
parser.add_argument('--algorithm', default=4,
help='1,2,4 How to perform STA, '+
'1 load all spike triggered stimuli,'+
'2 for sequentially load'+
'4 from text',
type=int, choices=[1, 2], required=False)
parser.add_argument('--start',
help='From which units should be processed',
type=int, default='0', required=False)
parser.add_argument('--end',
help='Up to which units should be processed',
type=int, default='2', required=False)
parser.add_argument('--stafolder',
help='Output folder',
type=str, default='.', required=False)
parser.add_argument('--path',
help='Path to files',
type=str, default='.', required=False)
parser.add_argument('--folder',
help='Folder with the stimuli files',
type=str, default='.', required=True)
parser.add_argument('--filter',
help='Filter for the stimuli images',
type=str, default='*.txt')
parser.add_argument('--timefolder',
help='SPIKE TIME STAMPS FOLDER FOR LOAD SPIKE TRAINS',
type=str, default='.', required=False)
parser.add_argument('--syncfile',
help='SET THE NAME OF THE STIMULUS SYNCHRONY ANALYSIS FILE'+
'IT CONTAINS THE INITIAL AND FINAL TIME STAMPS OF EACH FRAME',
type=str, default='.', required=True)
parser.add_argument('--samplingRate',
help='ADQUISITION SAMPLING RATE FOR THE RECORDS',
type=int, default=20000, required=False)
parser.add_argument('--numberframes',
help='NUMBER OF FRAMES BEFORE AND AFTER A SPIKE TO ANALISE',
type=int, default=18, required=False)
parser.add_argument('--numberframespost',
help='number of frames posterior to each spike for STA windows',
type=int, default=2, required=False)
parser.add_argument('--sizex',
help='SIZE OF EACH FRAME IN PIXELS X',
type=int, default=31, required=False)
parser.add_argument('--sizey',
help='SIZE OF EACH FRAME IN PIXELS Y',
type=int, default=31, required=False)
parser.add_argument('--dolog',
help='0,1 logarithm analysis for plot',
type=int, default=0, choices=[0, 1], required=False)
parser.add_argument('--stim_mini',
help='Stimuli matrix, check convertStim.py',
type=str, default='stim_mini.mat', required=True)
parser.add_argument('--characterisation',
help='Characterisation',
type=str, required=False)
parser.add_argument('--unit_files',
help='File with Units to process',
type=str, default='units_0003.txt', required=True)
args = parser.parse_args()
#=============================================
# GET SPIKE TIME FILE NAMES
#=============================================
archivosruta = args.path
archivosfolder = args.folder
archivofiltro = args.filter
# FOLDER NAME TO SAVE EACH FOLDER RESULTS
stafolder = args.stafolder
# SET THE NAME OF THE STIMULUS SYNCHRONY ANALYSIS FILE
# IT CONTAINS THE INITIAL AND FINAL TIME STAMPS OF EACH FRAME
synchronyfile = args.syncfile
getimagenames = args.getimagenames #must be 0
openimagesandwrite = args.openimagesandwrite #must be 0
calculatemeanrf = args.calculatemeanrf #must be 0
tipoalgoritmo = args.algorithm
# FOLDER NAME TO LOAD STIMULUS ENSEMBLE: IMAGE STIMULUS FOLDER
# imageruta = 'C:/Users/ALIEN3/Desktop/'
# imagefolder = 'checkImages'
# imagefiltro = '*.png'
# SPIKE TIME STAMPS FOLDER FOR LOAD SPIKE TRAINS
timefolder = archivosfolder
# SET THE ADQUISITION SAMPLING RATE OF THE RECORDS
samplingRate = args.samplingRate # Hz
# SET THE NUMBER OF FRAMES BEFORE AND AFTER A SPIKE TO ANALIZE:
# number of frames previous to each spike for STA windows
numberframes = args.numberframes
# number of frames posterior to each spike for STA windows
numberframespost = args.numberframespost
# SET THE SIZE OF EACH FRAME IN PIXELS
sizex = args.sizex #31
sizey = args.sizey #31
# set if do logarithm analysis for plot:
dolog = args.dolog
# SET THE NAME OF THE STIMULUS SYNCHRONY ANALYSIS FILE
# IT CONTAINS THE INITIAL AND FINAL TIME STAMPS OF EACH FRAME
if not os.path.isfile(synchronyfile):
print ''
print 'File [' + synchronyfile + '] not found'
sys.exit()
inicio_fin_frame = np.loadtxt(synchronyfile)
vector_fin_frame = inicio_fin_frame[:,1]
vector_inicio_frame = inicio_fin_frame[:,0]
# load image mat file stim_mini
stimMini = args.stim_mini
if not os.path.isfile(stimMini):
print 'File [' + stimMini + '] not found'
sys.exit()
# stimMini must be prepared using convertStim.py
estim = np.load(stimMini)
canal = 2 # same as choose channel 3 of RGB images
lenSyncFile = len(vector_fin_frame)
meanimagearray = np.add.reduce(estim,axis=2) // (1.0* 100000)
c = 1
inicio = args.start
final = args.end
if inicio < 0:
print ''
print 'start can not be lesser than 0'
sys.exit()
if inicio > final:
print ''
print 'start can not be lesser than end'
sys.exit()
#vectores units y caracterizacion provienen de la tabla excel
#pero como no la tenemos...la primera vez se deben ignorar
unit_files = args.unit_files
if not os.path.isfile(unit_files):
print ''
print 'File [' + unit_files + '] not found'
sys.exit()
f = open( unit_files ,'r')
per_row = []
for line in f:
per_row.append(line.split('\t'))
f.close()
#If the characterisationFile is not provided an array of length of the
# units must be provided.
characterisationFile = args.characterisation
if not characterisationFile:
characterization = np.ones((len(per_row[0]),), dtype=np.int)
else:
if not os.path.isfile(characterisationFile):
print ''
print 'File [' + characterisationFile + '] not found'
sys.exit()
else:
characterization = np.loadtxt(characterisationFile)
#Final lesser than Start
if final > len(characterization):
final=len(characterization)-inicio
#--------------------------------------------------------
# load image file names list: (the file should exist before)
#--------------------------------------------------------
# cadena_texto = "image_filenames"
# contenedor = scipy.io.loadmat(stafolder+'/'+cadena_texto+'.mat')
# ifn2 = contenedor['ifn']
# del contenedor
try:
os.mkdir( stafolder )
except OSError:
pass
def sta_1():
# LOAD ALL THE FRAMES ACCORDING TO THE TIME STAMPS OF THE CELL
# NOT FUNCTIONAL ANYMORE
limite3 = len(stimei)
kframe = 0
spk = np.zeros((500,500,numberframes,limite3))
for kiter in range(limite3):
kframe = stimei[kiter]
for b in range(numberframes):
print ' kiter: ',kiter, ' kframe: ',kframe, ' b: ',b
line = ifn2[kframe-(numberframes-1)+ b ]
imagen = scim.imread(line, flatten=True)
spk[:,:,b,kiter] = imagen - meanimagearray
N = len(stimei)
STA = ( np.add.reduce(spk,axis=3) / (1.0 * N) )
MEANSTA = ( np.add.reduce(STA,axis=2) / (1.0 * numberframes) )
def sta_2():
# LOAD EACH FRAME AND CALCULATES THE STA SEQUENTIALLY
timeAlgorithm2Ini = time.time()
kframe = 0
cadena_texto = "mean_image"
contenedor = scipy.io.loadmat(stafolder+'/'+cadena_texto+'.mat')
meanimagearray = contenedor['meanimagearray']
del contenedor
sizex = 380
sizey = 380
acumula = np.zeros((sizex,sizey,numberframes+numberframespost))
print 'Get the spike triggered stimuli: \n '
for kiter in range(limite3):
timeProcessIni = time.time()
kframe = stimei[kiter]
for b in range(numberframes+numberframespost):
line = ifn2[kframe-(numberframes-1)+ b]
imagen = scim.imread( line, flatten=True )
acumula[:,:,b] = acumula[:,:,b] + (imagen - meanimagearray)
if kiter > len(stimei):
break
timeProcessFin = time.time()
tiempoDiferencia = timeProcessFin - timeProcessIni
sys.stdout.write("\r%d%%" %((kiter+1)*100.0/limite3, ) )
sys.stdout.flush()
N = limite3 # len(stimei)
STA = acumula // N
print ' \n '
minimosta = np.min(np.min(np.min(STA)))
maximosta = np.max(np.max(np.max(STA)))
print '\nmin sta ', minimosta, ' max sta ', maximosta
if minimosta < 0:
STA_desp = STA + np.abs(minimosta) # lineal shift
if minimosta >= 0:
STA_desp = STA - np.abs(minimosta) # lineal shift
minimosta_desp = np.min(np.min(np.min(STA_desp)))
maximosta_desp = np.max(np.max(np.max(STA_desp)))
print 'min sta with bias', minimosta_desp
print 'max sta with bias', maximosta_desp
stavisual_lin = STA_desp*255 # it is visualized with lineal scale
stavisual_lin = stavisual_lin // (maximosta_desp *1.0) # it is normalized with lineal scale
print 'min sta visual lineal', np.min(np.min(np.min(stavisual_lin)))
print 'max sta visual lineal', np.min(np.max(np.max(stavisual_lin)))
# FINAL NORMALIZATION FOR THE MEAN STA
MEANSTA_lin = np.add.reduce(stavisual_lin,axis=2)
timeAlgorithm2End = time.time()
timeAlgorithm2Total = timeAlgorithm2End - timeAlgorithm2Ini
print " Time process ", timeAlgorithm2Total, ' seg (', timeAlgorithm2Total/60, ' min)'
def sta_3():
timeAlgorithm3Ini = time.time()
print 'Get the spike triggered stimuli: \n '
sizechunk = 40
sizesmall = 20
acumula = np.zeros((sizex,sizey,numberframes+numberframespost))
if dosmall:
acumulaSmall = np.zeros((sizesmall,sizesmall,numberframes+numberframespost))
for kblock in range(np.round(limite3/sizechunk)):
spk = np.zeros((sizex,sizey,numberframes+numberframespost,sizechunk))
if dosmall:
spkSmall = np.zeros((sizesmall,sizesmall,numberframes+numberframespost,sizechunk))
for kiter in range(sizechunk):
kframe = stimei[kiter+kblock*sizechunk]
for b in range(numberframes+numberframespost):
line = ifn2[kframe-(numberframes-1)+ b ]
imagen = scim.imread(line, flatten = True )
if dosmall:
imagenSmall = scipy.misc.imresize(imagen, [sizesmall,sizesmall] , interp = 'bilinear' , mode = None )
spk[:,:,b,kiter] = imagen
if dosmall:
spkSmall[:,:,b,kiter] = imagenSmall
del imagen
del line
if dosmall:
del imagenSmall
acuchunk = ( np.add.reduce(spk,axis=3) )
acumula[:,:,:] = acumula[:,:,:] + acuchunk
if dosmall:
acuchunkSmall = ( np.add.reduce(spkSmall,axis=3) )
acumulaSmall[:,:,:] = acumulaSmall[:,:,:] + acuchunkSmall
if kblock > np.round(limite3/sizechunk):
break
sys.stdout.write("\r%d%%" % ((kblock+1)*100.0 /(np.round(limite3/sizechunk)), ) )
sys.stdout.flush()
N = limite3
STA = acumula // N
for b in range(numberframes+numberframespost):
STA[:,:,b] = STA[:,:,b] - meanimagearray
if dosmall:
meansmall = scipy.misc.imresize(meanimagearray,[sizesmall,sizesmall], interp='bilinear', mode=None)
STASmall = acumulaSmall // N
for b in range(numberframes+numberframespost):
STASmall[:,:,b] = STASmall[:,:,b] - meansmall
print ' \n '
minimosta = np.min(np.min(np.min(STA)))
maximosta = np.max(np.max(np.max(STA)))
if minimosta < 0:
STA_desp = STA + np.abs(minimosta) # lineal shift
if minimosta >= 0:
STA_desp = STA - np.abs(minimosta) # lineal shift
minimosta_desp = np.min(np.min(np.min(STA_desp)))
maximosta_desp = np.max(np.max(np.max(STA_desp)))
stavisual_lin = STA_desp*255 # it is visualized with lineal scale
stavisual_lin = stavisual_lin // (maximosta_desp *1.0) # it is normalized with lineal scale
# FINAL NORMALIZATION FOR THE MEAN STA
MEANSTA_lin = ( np.add.reduce(stavisual_lin,axis=2) / (1.0 * (numberframes+numberframespost) ) )
if dosmall:
minstasmall = np.min(np.min(np.min(STASmall)))
maxstasmall = np.max(np.max(np.max(STASmall)))
if minstasmall < 0:
STA_Small_desp = STASmall + np.abs(minstasmall) # lineal shift
if minstasmall >= 0:
STA_Small_desp = STASmall - np.abs(minstasmall) # lineal shift
minstasmall_desp = np.min(np.min(np.min(STA_Small_desp)))
maxstasmall_desp = np.max(np.max(np.max(STA_Small_desp)))
sta_small_visual_lin = STA_Small_desp * 255 # it is visualized with lineal scale
sta_small_visual_lin = sta_small_visual_lin // (maxstasmall_desp *1.0) # it is normalized with lineal scale
# FINAL NORMALIZATION FOR THE MEAN STA
MEAN_STA_small_lin = ( np.add.reduce(sta_small_visual_lin,axis=2) / (1.0 * (numberframes+numberframespost) ) )
timeAlgorithm3End = time.time()
timeAlgorithm3Total = timeAlgorithm3End - timeAlgorithm3Ini
print " Time process ", timeAlgorithm3Total, ' seg (', timeAlgorithm3Total/60, ' min)'
def sta_4():
timeAlgorithm4Ini = time.time()
stac = np.zeros( ( sizex,sizey, numberframes+numberframespost ) ) # complete sta matrix
for numeroframe in range(numberframes): #for 18 frames
bigsta18 = np.zeros( ( sizex,sizey ) )
for kiter in range(len(stimei)):
bigsta18[:,:] = bigsta18[:,:] + estim[ :,:,stimei[kiter]-numeroframe ] - meanimagearray
sta18 = bigsta18 / (1.0 * len(stimei) ) # one part of the sta matrix
stac[:,:,numberframes-1 - numeroframe] = sta18
acumula = np.zeros((sizex,sizey,numberframes+numberframespost))
STA = stac
print ' \n '
minimosta = np.min(np.min(np.min(STA)))
maximosta = np.max(np.max(np.max(STA)))
STA_desp = STA - minimosta
minimosta_desp = np.min(np.min(np.min(STA_desp)))
maximosta_desp = np.max(np.max(np.max(STA_desp)))
stavisual_lin = STA_desp * 255 # it is visualized with lineal scale
stavisual_lin = stavisual_lin // (maximosta_desp *1.0) # it is normalized with lineal scale
# FINAL NORMALIZATION FOR THE MEAN STA
MEANSTA_lin = ( np.add.reduce(stavisual_lin,axis=2) / (1.0 * (numberframes+numberframespost) ) )
timeAlgorithm4End = time.time()
timeAlgorithm4Total = timeAlgorithm4End - timeAlgorithm4Ini
print " Time process ", timeAlgorithm4Total, ' seg (', timeAlgorithm4Total/60, ' min)'
print '\nsize STA: ',len(STA),'x',len(STA[0]),'x',len(STA[0][0])
return (STA , stavisual_lin, MEANSTA_lin, STA_desp, acumula)
for kunit in range(inicio,final):
timestampName = per_row[0][kunit]
print timestampName,' ',characterization[kunit]
if characterization[kunit] > 0:
print 'Analysing Unit ',timestampName, ' loop :', c ,' unit n ', c + inicio
print '---------------------BEGIN---------------------------'
#--------------------------------------------------------
# get spike time stamps from file
#--------------------------------------------------------
neurontag = timestampName # tag or number of cell
rastercelulatxtfile = timefolder + timestampName +'.txt'
timestamps = np.loadtxt(rastercelulatxtfile) # text file containing time spikes in datapoints
neuronresultfolder_lin = str(neurontag)+'_lineal'
try:
os.mkdir( stafolder+'/'+neuronresultfolder_lin ) # create the folder
except OSError:
pass
finalfolder_lin = stafolder+'/'+neuronresultfolder_lin
print 'size time stamps vector: ', len(timestamps) #, 'x',len(timestamps[0])
#--------------------------------------------------------
# get time spikes depending of the stimulus start (frame do not start in time=0)
#--------------------------------------------------------
#--------------------------------------------------------
# Conversion of spike times from seconds to POINTS:
#--------------------------------------------------------
vector_spikes = timestamps[:]*samplingRate # without first id zero column (1 COLUMMN)
stimei = [] # initialize time spike index depending of image time
spikeframe_matrix = np.zeros( (len(vector_spikes), 4) ) # [spike time, frame id, ini time frame, end time frame]
#--------------------------------------------------------
# convert stimes (SPIKE TIMES) to frame indexes (image index):
#--------------------------------------------------------
primer_frame = 0
frame_ant = 0
print 'Get the spike triggered stimuli indices: \n'
contator = 0
contator2 = 0
totalcont = len(vector_spikes) * len(range(primer_frame, len(vector_fin_frame)))
for punto_spike in vector_spikes:
condicion = 1
for i in range(primer_frame, len(vector_fin_frame)):
if (vector_inicio_frame[i] < punto_spike) & (punto_spike <= vector_fin_frame[i]):
# if the spike time is into a frame time points (start and ends)
spikeframe_matrix[contator,0] = punto_spike
spikeframe_matrix[contator,1] = vector_fin_frame[i]
spikeframe_matrix[contator,2] = inicio_fin_frame[i,0]
spikeframe_matrix[contator,3] = inicio_fin_frame[i,1]
stimei.append(i)
frame_ant = i
break
sys.stdout.write("\r%d%%" %contator2)
sys.stdout.flush()
contator = contator + 1 #
contator2 = contator * 100 // ( 1.0 * len(vector_spikes) )
primer_frame = frame_ant
print '\n'
limite3 = len(stimei)
print 'length frames times vector', len(vector_fin_frame)
print "length time stamps vector: ", len(timestamps)
print "length spike triggered stimuli time i vector: ", len(stimei)
#--------------------------------------------------------
# STA Algorithm
#--------------------------------------------------------
#------------------- ALGORITHM TYPE 1----------------------
if(tipoalgoritmo == 1):
sta_1()
#------------------- ALGORITHM TYPE 2----------------------
if(tipoalgoritmo == 2): # sequentially algorithm
sta_2()
dosmall = 0
#------------------- ALGORITHM TYPE 3----------------------
if(tipoalgoritmo == 3): # LOAD CHUNKS OF FRAMES AND CALCULATES THE STA SEQUENTIALLY
sta_3()
#===============================================================================
#------------------- ALGORITHM TYPE 4----------------------
if(tipoalgoritmo == 4): # LOAD entire matrix stimuli AND CALCULATES THE STA SEQUENTIALLY
STA , stavisual_lin , MEANSTA_lin, STA_desp, acumula = sta_4()
#----------------------------------------------------
# save spike time stamp and frame index
#----------------------------------------------------
spikeframe_matrix_array = np.array(spikeframe_matrix)
spikeframe_filename = "spikeframe_matrix"+str(neurontag)
print "Save spike frame matrix as mat file: ",spikeframe_filename
scipy.io.savemat(finalfolder_lin+'/'+spikeframe_filename+'.mat',mdict={'spikeframe_matrix':spikeframe_matrix_array},oned_as='column')
#----------------------------------------------------
# save true STA matrix (NON SCALED for visual plot)
#----------------------------------------------------
STA_array = np.array(STA)
cadena_texto = "sta_array_"+str(neurontag)
print "Saving NON rescaled STA as mat file: ",cadena_texto
scipy.io.savemat(finalfolder_lin+'/'+cadena_texto+'.mat',mdict={'STA_array':STA_array},oned_as='column')
#----------------------------------------------------
# save visual STA matrix ( RE SCALED for visual plot)
#----------------------------------------------------
stavisual_lin_array = np.array(stavisual_lin)
cadena_texto = "stavisual_lin_array_"+str(neurontag)
print "Saving visual STA (lineal) as mat file: ",cadena_texto
scipy.io.savemat(finalfolder_lin+'/'+cadena_texto+'.mat',mdict={'STAarray_lin':stavisual_lin_array},oned_as='column')
print 'Saving images in lineal scale...'
plt.clf()
fig = plt.figure(1, figsize=(12,10))
ax = fig.add_subplot(3,6,1)
component = stavisual_lin[:,:,0]
ax.pcolormesh( component,vmin = 0,vmax = 255, cmap=cm.jet )
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_aspect(1)
kcontador = 2
for ksubplot in range(17):
ax = fig.add_subplot(3,6,kcontador)
component = stavisual_lin[:,:,kcontador-1]
ax.pcolormesh( component,vmin = 0,vmax = 255, cmap=cm.jet )
ax.set_aspect(1)
ax.set_yticklabels([])
ax.set_xticklabels([])
kcontador = kcontador + 1
plt.savefig(finalfolder_lin+"/STA-"+str(neurontag)+"_.png",format='png', bbox_inches='tight')
plt.savefig(stafolder+"/STA-"+str(neurontag)+"_.png",format='png', bbox_inches='tight')
plt.show()
plt.clf()
#------------------------------------------------------
print 'Saving mean image in lineal scale...'
pl.figure()
im = pl.pcolormesh(MEANSTA_lin,vmin = 0,vmax = 255, cmap=cm.jet)
pl.jet()
pl.colorbar(im)
ax = pl.axes()
ax.set_yticklabels([])
ax.set_xticklabels([])
pl.savefig(finalfolder_lin+"/MEANSTA-g_"+str(neurontag)+".png",format='png', bbox_inches='tight')
print 'CELL ' + timestampName + ' FINISHED!!!'
print '-----------------------END---------------------------'
del STA_desp
del STA
del stavisual_lin
del spikeframe_matrix
del acumula
c+=1
| gpl-2.0 |
adalke/rdkit | rdkit/Chem/PandasTools.py | 1 | 22208 | '''
Importing pandasTools enables several features that allow for using RDKit molecules as columns of a Pandas dataframe.
If the dataframe is containing a molecule format in a column (e.g. smiles), like in this example:
>>> from rdkit.Chem import PandasTools
>>> import pandas as pd
>>> import os
>>> from rdkit import RDConfig
>>> antibiotics = pd.DataFrame(columns=['Name','Smiles'])
>>> antibiotics = antibiotics.append({'Smiles':'CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C','Name':'Penicilline G'}, ignore_index=True)#Penicilline G
>>> antibiotics = antibiotics.append({'Smiles':'CC1(C2CC3C(C(=O)C(=C(C3(C(=O)C2=C(C4=C1C=CC=C4O)O)O)O)C(=O)N)N(C)C)O','Name':'Tetracycline'}, ignore_index=True)#Tetracycline
>>> antibiotics = antibiotics.append({'Smiles':'CC1(C(N2C(S1)C(C2=O)NC(=O)C(C3=CC=CC=C3)N)C(=O)O)C','Name':'Ampicilline'}, ignore_index=True)#Ampicilline
>>> print([str(x) for x in antibiotics.columns])
['Name', 'Smiles']
>>> print(antibiotics)
Name Smiles
0 Penicilline G CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C
1 Tetracycline CC1(C2CC3C(C(=O)C(=C(C3(C(=O)C2=C(C4=C1C=CC=C4...
2 Ampicilline CC1(C(N2C(S1)C(C2=O)NC(=O)C(C3=CC=CC=C3)N)C(=O...
a new column can be created holding the respective RDKit molecule objects. The fingerprint can be included to accelerate substructure searches on the dataframe.
>>> PandasTools.AddMoleculeColumnToFrame(antibiotics,'Smiles','Molecule',includeFingerprints=True)
>>> print([str(x) for x in antibiotics.columns])
['Name', 'Smiles', 'Molecule']
A substructure filter can be applied on the dataframe using the RDKit molecule column, because the ">=" operator has been modified to work as a substructure check.
Such the antibiotics containing the beta-lactam ring "C1C(=O)NC1" can be obtained by
>>> beta_lactam = Chem.MolFromSmiles('C1C(=O)NC1')
>>> beta_lactam_antibiotics = antibiotics[antibiotics['Molecule'] >= beta_lactam]
>>> print(beta_lactam_antibiotics[['Name','Smiles']])
Name Smiles
0 Penicilline G CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C
2 Ampicilline CC1(C(N2C(S1)C(C2=O)NC(=O)C(C3=CC=CC=C3)N)C(=O...
It is also possible to load an SDF file can be load into a dataframe.
>>> sdfFile = os.path.join(RDConfig.RDDataDir,'NCI/first_200.props.sdf')
>>> frame = PandasTools.LoadSDF(sdfFile,smilesName='SMILES',molColName='Molecule',includeFingerprints=True)
>>> frame.info # doctest: +SKIP
<bound method DataFrame.info of <class 'pandas.core.frame.DataFrame'>
Int64Index: 200 entries, 0 to 199
Data columns:
AMW 200 non-null values
CLOGP 200 non-null values
CP 200 non-null values
CR 200 non-null values
DAYLIGHT.FPG 200 non-null values
DAYLIGHT_CLOGP 200 non-null values
FP 200 non-null values
ID 200 non-null values
ISM 200 non-null values
LIPINSKI_VIOLATIONS 200 non-null values
NUM_HACCEPTORS 200 non-null values
NUM_HDONORS 200 non-null values
NUM_HETEROATOMS 200 non-null values
NUM_LIPINSKIHACCEPTORS 200 non-null values
NUM_LIPINSKIHDONORS 200 non-null values
NUM_RINGS 200 non-null values
NUM_ROTATABLEBONDS 200 non-null values
P1 30 non-null values
SMILES 200 non-null values
Molecule 200 non-null values
dtypes: object(20)>
Conversion to html is quite easy:
>>> htm = frame.to_html()
>>> str(htm[:36])
'<table border="1" class="dataframe">'
In order to support rendering the molecules as images in the HTML export of the dataframe, the __str__ method is monkey-patched to return a base64 encoded PNG:
>>> molX = Chem.MolFromSmiles('Fc1cNc2ccccc12')
>>> print(molX) # doctest: +SKIP
<img src="data:image/png;base64,..." alt="Mol"/>
This can be reverted using the ChangeMoleculeRendering method
>>> ChangeMoleculeRendering(renderer='String')
>>> print(molX) # doctest: +SKIP
<rdkit.Chem.rdchem.Mol object at 0x10d179440>
>>> ChangeMoleculeRendering(renderer='PNG')
>>> print(molX) # doctest: +SKIP
<img src="data:image/png;base64,..." alt="Mol"/>
'''
from __future__ import print_function
from base64 import b64encode
import types,copy
from rdkit.six import BytesIO, string_types
from rdkit import Chem
from rdkit.Chem import Draw
try:
import pandas as pd
try:
v = pd.__version__.split('.')
except AttributeError:
# support for older versions of pandas
v = pd.version.version.split('.')
if v[0]=='0' and int(v[1])<10:
print("Pandas version %s not compatible with tests"%v, file=sys.stderr)
pd = None
else:
if 'display.width' in pd.core.config._registered_options:
pd.set_option('display.width',1000000000)
if 'display.max_rows' in pd.core.config._registered_options:
pd.set_option('display.max_rows',1000000000)
elif 'display.height' in pd.core.config._registered_options:
pd.set_option('display.height',1000000000)
if 'display.max_colwidth' in pd.core.config._registered_options:
pd.set_option('display.max_colwidth',1000000000)
#saves the default pandas rendering to allow restauration
defPandasRendering = pd.core.frame.DataFrame.to_html
except ImportError:
import traceback
traceback.print_exc()
pd = None
except Exception as e:
import sys
import traceback
traceback.print_exc()
pd = None
if pd:
try:
from pandas.formats import format as fmt
except ImportError:
from pandas.core import format as fmt # older versions
highlightSubstructures=True
molRepresentation = 'png' # supports also SVG
molSize = (200,200)
def patchPandasHTMLrepr(self,**kwargs):
'''
Patched default escaping of HTML control characters to allow molecule image rendering dataframes
'''
formatter = fmt.DataFrameFormatter(self,buf=None,columns=None,col_space=None,colSpace=None,header=True,index=True,
na_rep='NaN',formatters=None,float_format=None,sparsify=None,index_names=True,
justify = None, force_unicode=None,bold_rows=True,classes=None,escape=False)
formatter.to_html()
html = formatter.buf.getvalue()
return html
def patchPandasHeadMethod(self,n=5):
'''Ensure inheritance of patched to_html in "head" subframe
'''
df = self[:n]
df.to_html = types.MethodType(patchPandasHTMLrepr,df)
df.head = types.MethodType(patchPandasHeadMethod,df)
return df
def _get_image(x):
"""displayhook function for PIL Images, rendered as PNG"""
import pandas as pd
bio = BytesIO()
x.save(bio,format='PNG')
s = b64encode(bio.getvalue()).decode('ascii')
pd.set_option('display.max_columns',len(s)+1000)
pd.set_option('display.max_rows',len(s)+1000)
if len(s)+100 > pd.get_option("display.max_colwidth"):
pd.set_option("display.max_colwidth",len(s)+1000)
return s
def _get_svg_image(mol, size=(200,200), highlightAtoms=[]):
""" mol rendered as SVG """
from IPython.display import SVG
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
try:
# If no coordinates, calculate 2D
mol.GetConformer(-1)
except ValueError:
rdDepictor.Compute2DCoords(mol)
drawer = rdMolDraw2D.MolDraw2DSVG(*size)
drawer.DrawMolecule(mol,highlightAtoms=highlightAtoms)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace('svg:','')
return SVG(svg).data # IPython's SVG clears the svg text
from rdkit import DataStructs
try:
from rdkit.Avalon import pyAvalonTools as pyAvalonTools
_fingerprinter=lambda x,y:pyAvalonTools.GetAvalonFP(x,isQuery=y,bitFlags=pyAvalonTools.avalonSSSBits)
except ImportError:
_fingerprinter=lambda x,y:Chem.PatternFingerprint(x,fpSize=2048)
def _molge(x,y):
"""Allows for substructure check using the >= operator (X has substructure Y -> X >= Y) by
monkey-patching the __ge__ function
This has the effect that the pandas/numpy rowfilter can be used for substructure filtering (filtered = dframe[dframe['RDKitColumn'] >= SubstructureMolecule])
"""
if x is None or y is None: return False
if hasattr(x,'_substructfp'):
if not hasattr(y,'_substructfp'):
y._substructfp=_fingerprinter(y,True)
if not DataStructs.AllProbeBitsMatch(y._substructfp,x._substructfp):
return False
match = x.GetSubstructMatch(y)
if match:
if highlightSubstructures:
x.__sssAtoms=list(match)
else:
x.__sssAtoms=[]
return True
else:
return False
Chem.Mol.__ge__ = _molge # lambda x,y: x.HasSubstructMatch(y)
def PrintAsBase64PNGString(x,renderer = None):
'''returns the molecules as base64 encoded PNG image
'''
if highlightSubstructures and hasattr(x,'__sssAtoms'):
highlightAtoms=x.__sssAtoms
else:
highlightAtoms=[]
if molRepresentation.lower() == 'svg':
return _get_svg_image(x, highlightAtoms=highlightAtoms, size=molSize)
else:
return '<img src="data:image/png;base64,%s" alt="Mol"/>'%_get_image(Draw.MolToImage(x,highlightAtoms=highlightAtoms, size=molSize))
def PrintDefaultMolRep(x):
return str(x.__repr__())
#Chem.Mol.__str__ = lambda x: '<img src="data:image/png;base64,%s" alt="Mol"/>'%get_image(Draw.MolToImage(x))
Chem.Mol.__str__ = PrintAsBase64PNGString
def _MolPlusFingerprint(m):
'''Precomputes fingerprints and stores results in molecule objects to accelerate substructure matching
'''
#m = Chem.MolFromSmiles(smi)
if m is not None:
m._substructfp=_fingerprinter(m,False)
return m
def RenderImagesInAllDataFrames(images=True):
'''Changes the default dataframe rendering to not escape HTML characters, thus allowing rendered images in all dataframes.
IMPORTANT: THIS IS A GLOBAL CHANGE THAT WILL AFFECT TO COMPLETE PYTHON SESSION. If you want to change the rendering only
for a single dataframe use the "ChangeMoleculeRendering" method instead.
'''
if images:
pd.core.frame.DataFrame.to_html = patchPandasHTMLrepr
else:
pd.core.frame.DataFrame.to_html = defPandasRendering
def AddMoleculeColumnToFrame(frame, smilesCol='Smiles', molCol = 'ROMol',includeFingerprints=False):
'''Converts the molecules contains in "smilesCol" to RDKit molecules and appends them to the dataframe "frame" using the specified column name.
If desired, a fingerprint can be computed and stored with the molecule objects to accelerate substructure matching
'''
if not includeFingerprints:
frame[molCol]=frame[smilesCol].map(Chem.MolFromSmiles)
else:
frame[molCol]=frame[smilesCol].map(lambda smiles: _MolPlusFingerprint(Chem.MolFromSmiles(smiles)))
RenderImagesInAllDataFrames(images=True)
#frame.to_html = types.MethodType(patchPandasHTMLrepr,frame)
#frame.head = types.MethodType(patchPandasHeadMethod,frame)
def ChangeMoleculeRendering(frame=None, renderer='PNG'):
'''Allows to change the rendering of the molecules between base64 PNG images and string representations.
This serves two purposes: First it allows to avoid the generation of images if this is not desired and, secondly, it allows to enable image rendering for
newly created dataframe that already contains molecules, without having to rerun the time-consuming AddMoleculeColumnToFrame. Note: this behaviour is, because some pandas methods, e.g. head()
returns a new dataframe instance that uses the default pandas rendering (thus not drawing images for molecules) instead of the monkey-patched one.
'''
if renderer == 'String':
Chem.Mol.__str__ = PrintDefaultMolRep
else:
Chem.Mol.__str__ = PrintAsBase64PNGString
if frame is not None:
frame.to_html = types.MethodType(patchPandasHTMLrepr,frame)
def LoadSDF(filename, idName='ID',molColName = 'ROMol',includeFingerprints=False, isomericSmiles=False, smilesName=None, embedProps=False):
'''Read file in SDF format and return as Pandas data frame.
If embedProps=True all properties also get embedded in Mol objects in the molecule column.
If molColName=None molecules would not be present in resulting DataFrame (only properties would be read).
'''
df = None
if isinstance(filename, string_types):
if filename.lower()[-3:] == ".gz":
import gzip
f = gzip.open(filename, "rb")
else:
f = open(filename, 'rb')
close = f.close
else:
f = filename
close = None # don't close an open file that was passed in
records = []
indices = []
for i, mol in enumerate(Chem.ForwardSDMolSupplier(f,sanitize=(molColName is not None))):
if mol is None: continue
row = dict((k, mol.GetProp(k)) for k in mol.GetPropNames())
if molColName is not None and not embedProps:
for prop in mol.GetPropNames():
mol.ClearProp(prop)
if mol.HasProp('_Name'): row[idName] = mol.GetProp('_Name')
if smilesName is not None:
row[smilesName] = Chem.MolToSmiles(mol, isomericSmiles=isomericSmiles)
if molColName is not None and not includeFingerprints:
row[molColName] = mol
elif molColName is not None:
row[molColName] = _MolPlusFingerprint(mol)
records.append(row)
indices.append(i)
if close is not None: close()
RenderImagesInAllDataFrames(images=True)
return pd.DataFrame(records, index=indices)
from rdkit.Chem import SDWriter
def WriteSDF(df, out, molColName='ROMol', idName=None, properties=None, allNumeric=False):
'''Write an SD file for the molecules in the dataframe. Dataframe columns can be exported as SDF tags if specified in the "properties" list. "properties=list(df.columns)" would export all columns.
The "allNumeric" flag allows to automatically include all numeric columns in the output. User has to make sure that correct data type is assigned to column.
"idName" can be used to select a column to serve as molecule title. It can be set to "RowID" to use the dataframe row key as title.
'''
close = None
if isinstance(out, string_types):
if out.lower()[-3:] == ".gz":
import gzip
out = gzip.open(out, "wb")
close = out.close
writer = SDWriter(out)
if properties is None:
properties=[]
else:
properties=list(properties)
if allNumeric:
properties.extend([dt for dt in df.dtypes.keys() if (np.issubdtype(df.dtypes[dt],float) or np.issubdtype(df.dtypes[dt],int))])
if molColName in properties:
properties.remove(molColName)
if idName in properties:
properties.remove(idName)
writer.SetProps(properties)
for row in df.iterrows():
# make a local copy I can modify
mol = Chem.Mol(row[1][molColName])
if idName is not None:
if idName == 'RowID':
mol.SetProp('_Name',str(row[0]))
else:
mol.SetProp('_Name',str(row[1][idName]))
for p in properties:
cell_value = row[1][p]
# Make sure float does not get formatted in E notation
if np.issubdtype(type(cell_value),float):
s = '{:f}'.format(cell_value).rstrip("0") # "f" will show 7.0 as 7.00000
if s[-1] == ".":
s += "0" # put the "0" back on if it's something like "7."
mol.SetProp(p, s)
else:
mol.SetProp(p,str(cell_value))
writer.write(mol)
writer.close()
if close is not None: close()
_saltRemover = None
def RemoveSaltsFromFrame(frame, molCol = 'ROMol'):
'''
Removes salts from mols in pandas DataFrame's ROMol column
'''
global _saltRemover
if _saltRemover is None:
from rdkit.Chem import SaltRemover
_saltRemover = SaltRemover.SaltRemover()
frame[molCol] = frame.apply(lambda x: _saltRemover.StripMol(x[molCol]), axis = 1)
def SaveSMILESFromFrame(frame, outFile, molCol='ROMol', NamesCol='', isomericSmiles=False):
'''
Saves smi file. SMILES are generated from column with RDKit molecules. Column with names is optional.
'''
w = Chem.SmilesWriter(outFile, isomericSmiles=isomericSmiles)
if NamesCol != '':
for m,n in zip(frame[molCol], map(str,frame[NamesCol])):
m.SetProp('_Name',n)
w.write(m)
w.close()
else:
for m in frame[molCol]:
w.write(m)
w.close()
import numpy as np
import os
from rdkit.six.moves import cStringIO as StringIO
def SaveXlsxFromFrame(frame, outFile, molCol='ROMol', size=(300,300)):
"""
Saves pandas DataFrame as a xlsx file with embedded images.
It maps numpy data types to excel cell types:
int, float -> number
datetime -> datetime
object -> string (limited to 32k character - xlsx limitations)
Cells with compound images are a bit larger than images due to excel.
Column width weirdness explained (from xlsxwriter docs):
The width corresponds to the column width value that is specified in Excel.
It is approximately equal to the length of a string in the default font of Calibri 11.
Unfortunately, there is no way to specify "AutoFit" for a column in the Excel file format.
This feature is only available at runtime from within Excel.
"""
import xlsxwriter # don't want to make this a RDKit dependency
cols = list(frame.columns)
cols.remove(molCol)
dataTypes = dict(frame.dtypes)
workbook = xlsxwriter.Workbook(outFile) # New workbook
worksheet = workbook.add_worksheet() # New work sheet
worksheet.set_column('A:A', size[0]/6.) # column width
# Write first row with column names
c2 = 1
for x in cols:
worksheet.write_string(0, c2, x)
c2 += 1
c = 1
for index, row in frame.iterrows():
image_data = StringIO()
img = Draw.MolToImage(row[molCol], size=size)
img.save(image_data, format='PNG')
worksheet.set_row(c, height=size[1]) # looks like height is not in px?
worksheet.insert_image(c, 0, "f", {'image_data': image_data})
c2 = 1
for x in cols:
if str(dataTypes[x]) == "object":
worksheet.write_string(c, c2, str(row[x])[:32000]) # string length is limited in xlsx
elif ('float' in str(dataTypes[x])) or ('int' in str(dataTypes[x])):
if (row[x] != np.nan) or (row[x] != np.inf):
worksheet.write_number(c, c2, row[x])
elif 'datetime' in str(dataTypes[x]):
worksheet.write_datetime(c, c2, row[x])
c2 += 1
c += 1
workbook.close()
image_data.close()
def FrameToGridImage(frame, column = 'ROMol', legendsCol=None, **kwargs):
'''
Draw grid image of mols in pandas DataFrame.
'''
if legendsCol:
if legendsCol == frame.index.name:
img = Draw.MolsToGridImage(frame[column], legends=list(map(str, list(frame.index))), **kwargs)
else:
img = Draw.MolsToGridImage(frame[column], legends=list(map(str, list(frame[legendsCol]))), **kwargs)
else:
img = Draw.MolsToGridImage(frame[column], **kwargs)
return img
from rdkit.Chem.Scaffolds import MurckoScaffold
def AddMurckoToFrame(frame, molCol = 'ROMol', MurckoCol = 'Murcko_SMILES', Generic = False):
'''
Adds column with SMILES of Murcko scaffolds to pandas DataFrame. Generic set to true results in SMILES of generic framework.
'''
if Generic:
frame[MurckoCol] = frame.apply(lambda x: Chem.MolToSmiles(MurckoScaffold.MakeScaffoldGeneric(MurckoScaffold.GetScaffoldForMol(x[molCol]))), axis=1)
else:
frame[MurckoCol] = frame.apply(lambda x: Chem.MolToSmiles(MurckoScaffold.GetScaffoldForMol(x[molCol])), axis=1)
from rdkit.Chem import AllChem
def AlignMol(mol,scaffold):
"""
Aligns mol (RDKit mol object) to scaffold (SMILES string)
"""
scaffold = Chem.MolFromSmiles(scaffold)
AllChem.Compute2DCoords(scaffold)
AllChem.GenerateDepictionMatching2DStructure(mol,scaffold)
return mol
def AlignToScaffold(frame, molCol='ROMol', scaffoldCol='Murcko_SMILES'):
'''
Aligns molecules in molCol to scaffolds in scaffoldCol
'''
frame[molCol] = frame.apply(lambda x: AlignMol(x[molCol],x[scaffoldCol]), axis=1)
if __name__ == "__main__":
import sys
if pd is None:
print("pandas installation not found, skipping tests", file=sys.stderr)
else:
# version check
try:
v = pd.__version__.split('.')
except AttributeError:
# support for older versions of pandas
v = pd.version.version.split('.')
if v[0]=='0' and int(v[1])<10:
print("pandas installation >=0.10 not found, skipping tests",
file=sys.stderr)
else:
import doctest
failed,tried=doctest.testmod(optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE)
if failed:
sys.exit(failed)
# $Id$
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
| bsd-3-clause |
armorsun/CodeForEarth | Machine Learning/model_Logistic Regression.py | 1 | 2258 | import pickle
import numpy as np
import itertools
import math
######################################
# Part1 : Read File
######################################
f = open('feature.p','rb')
data = pickle.load(f)
f.close()
Feature = data[0]
Label = data[1]
print Feature.shape
print len(Label)
###########################################
#Training
##########################################
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
# reg = linear_model.BayesianRidge()
# reg.fit(Feature, Label)
clf = LogisticRegression()
clf.fit(Feature, Label)
print 'score', clf.score(Feature, Label)
###########################################
#Testing
##########################################
# simlat = np.random.uniform(-90,90,1).tolist()[0]
# simlog = np.random.uniform(0,360,1).tolist()[0]
output = []
simtesting = list(itertools.product(range(-90,91),range(1,361)))
simtesting = [[2018, e[0], e[1]] for e in simtesting]
#print reg.predict (simtesting)
print clf.predict(simtesting)
#t = 0
for i,test in enumerate(clf.predict_proba(simtesting).tolist()):
if test[1] > 0.92: # 0.925
print simtesting[i]
output.append(simtesting[i])
#t+=1
print len(output)
output = []
for y in [2018,2019,2020,2021,2022]:
tmp = {}
simtesting = list(itertools.product(range(-90,91),range(1,361)))
simtesting = [[y, e[0], e[1]] for e in simtesting]
#print reg.predict (simtesting)
print clf.predict(simtesting)
#t = 0
for i,test in enumerate(clf.predict_proba(simtesting).tolist()):
if test[1] > 0.98: # 0.925
print simtesting[i]
x = 0.16 * math.sin(math.radians(float(simtesting[i][1]))) * math.cos(math.radians(float(simtesting[i][2])))
y = 0.16 * math.sin(math.radians(float(simtesting[i][1]))) * math.sin(math.radians(float(simtesting[i][2])))
z = 0.16 * math.cos(math.radians(float(simtesting[i][1])))
tmp['Date'] = '01 Oct ' + '%d' %(y)
tmp['Time'] = '12:00:00'
tmp['X'] = x
tmp['Y'] = y
tmp['Z'] = z
output.append(simtesting[i])
#t+=1
print len(output)
###########################################
#Writing JSON data
###########################################
import json
data = {
'date': output
}
with open('data_predict.json', 'w') as f:
json.dump(data, f) | mit |
rafaelvalle/MDI | plot_parameters_tried.py | 1 | 6241 | #!/usr/local/bin/python
import os
import argparse
import glob
import numpy as np
import deepdish as dd
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
# import seaborn as sbn
from mpl_toolkits.mplot3d import Axes3D
from params import IMAGES_DIRECTORY
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return base.from_list(cmap_name, color_list, N)
def plot_3d(params_dir):
N = 2 # bins for colormap
model_dirs = [name for name in os.listdir(params_dir)
if os.path.isdir(os.path.join(params_dir, name))]
colors = plt.get_cmap('plasma')
plt.figure(figsize=(20, 10))
ax = plt.subplot(111, projection='3d')
ax.set_xlabel('Momentum')
ax.set_ylabel('Learning Rate')
ax.zaxis.set_rotate_label(False) # disable automatic rotation
ax.set_zlabel('Training error rate', rotation=270)
ax.set_xticks(np.arange(0, 1.2, 0.2))
ax.set_yticks(np.arange(0, 0.011, 0.002))
ax.set_zticks(np.arange(0, 0.9, 0.1))
#ax.set_xticklabels(('No', 'Yes'))
#ax.set_zticklabels(('0','0.1','0.2','0.3','0.4','0.5','0.6','0.7','0.8'))
ax.invert_yaxis() # invert y axis
ax.invert_xaxis() # invert x axis
#ax.view_init(azim=-178, elev=32)
i = 0
for model_dir in model_dirs:
model_df = pd.DataFrame()
for param_path in glob.glob(os.path.join(params_dir,
model_dir) + '/*.h5'):
param = dd.io.load(param_path)
gd = {'learning rate': param['hyperparameters']['learning_rate'],
'momentum': param['hyperparameters']['momentum'],
'dropout': param['hyperparameters']['dropout'],
'val. objective': param['best_epoch']['validate_objective']}
model_df = model_df.append(pd.DataFrame(gd, index=[0]),
ignore_index=True)
if i != len(model_dirs) - 1:
ax.scatter(model_df['momentum'],
model_df['learning rate'],
model_df['val. objective'],
s=128,
marker=(i+3, 0),
label=model_dir,
# c=model_df['val. objective'],
c=model_df['dropout'],
cmap=discrete_cmap(N, 'jet'))
else:
im = ax.scatter(model_df['momentum'],
model_df['learning rate'],
model_df['val. objective'],
s=128,
marker=(i+4, 0),
label=model_dir,
# c=model_df['val. objective'],
c=model_df['dropout'],
cmap=discrete_cmap(N, 'jet'))
i += 1
cbar=plt.colorbar(im, label='Dropout',ticks=range(N))
cbar.ax.set_yticklabels(['No','Yes'])
cbar.set_label('Dropout', rotation=270)
#plt.legend()
plt.title('Adult dataset',weight='bold')
plt.show()
plt.savefig('{}.eps'.format(os.path.join(IMAGES_DIRECTORY, 'params3d_adult')), format='eps', dpi=1000)
plt.close()
def plot_2d(params_dir):
model_dirs = [name for name in os.listdir(params_dir)
if os.path.isdir(os.path.join(params_dir, name))]
if len(model_dirs) == 0:
model_dirs = [params_dir]
colors = plt.get_cmap('plasma')
plt.figure(figsize=(20, 10))
ax = plt.subplot(111)
ax.set_xlabel('Learning Rate')
ax.set_ylabel('Error rate')
i = 0
for model_dir in model_dirs:
model_df = pd.DataFrame()
for param_path in glob.glob(os.path.join(params_dir,
model_dir) + '/*.h5'):
param = dd.io.load(param_path)
gd = {'learning rate': param['hyperparameters']['learning_rate'],
'momentum': param['hyperparameters']['momentum'],
'dropout': param['hyperparameters']['dropout'],
'val. objective': param['best_epoch']['validate_objective']}
model_df = model_df.append(pd.DataFrame(gd, index=[0]),
ignore_index=True)
if i != len(model_dirs) - 1:
ax.scatter(model_df['learning rate'],
model_df['val. objective'],
s=128,
marker=(i+3, 0),
edgecolor='black',
linewidth=model_df['dropout'],
label=model_dir,
c=model_df['momentum'],
cmap=colors)
else:
im = ax.scatter(model_df['learning rate'],
model_df['val. objective'],
s=128,
marker=(i+3, 0),
edgecolor='black',
linewidth=model_df['dropout'],
label=model_dir,
c=model_df['momentum'],
cmap=colors)
i += 1
plt.colorbar(im, label='Momentum')
plt.legend()
plt.show()
plt.savefig('{}.eps'.format(os.path.join(IMAGES_DIRECTORY, 'params2d')), format='eps', dpi=1000)
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("params_dir", type=str,
help="Fullpath to parameter trial folders")
parser.add_argument("ndims", type=int, default=2,
help="Fullpath to parameter trial folders")
args = parser.parse_args()
if args.ndims == 2:
plot_2d(args.params_dir)
elif args.ndims == 3:
plot_3d(args.params_dir)
else:
raise Exception(
"{} is not a valid number of dimensions".format(args.ndmins))
| mit |
maryklayne/Funcao | sympy/plotting/plot_implicit.py | 12 | 14128 | """Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted used interval
interval arithmetic. It is also possible to specify to use the fall back
algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Symbol, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples:
=========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
When plotting an expression with a single variable (y - 1, for example),
specify the x or the y variable explicitly:
>>> p8 = plot_implicit(y - 1, y_var=y)
>>> p9 = plot_implicit(x - 1, x_var=x)
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
| bsd-3-clause |
subutai/nupic | examples/tm/tm_high_order.py | 15 | 17726 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple tutorial that shows some features of the Temporal Memory.
The following program has the purpose of presenting some
basic properties of the Temporal Memory, in particular when it comes
to how it handles high-order sequences.
"""
import numpy as np
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nupic.bindings.algorithms import TemporalMemory as TM
def accuracy(current, predicted):
"""
Computes the accuracy of the TM at time-step t based on the prediction
at time-step t-1 and the current active columns at time-step t.
@param current (array) binary vector containing current active columns
@param predicted (array) binary vector containing predicted active columns
@return acc (float) prediction accuracy of the TM at time-step t
"""
accuracy = 0
if np.count_nonzero(predicted) > 0:
accuracy = float(np.dot(current, predicted))/float(np.count_nonzero(predicted))
return accuracy
def corruptVector(v1, noiseLevel, numActiveCols):
"""
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
"""
size = len(v1)
v2 = np.zeros(size, dtype="uint32")
bitsToSwap = int(noiseLevel * numActiveCols)
# Copy the contents of v1 into v2
for i in range(size):
v2[i] = v1[i]
for _ in range(bitsToSwap):
i = random.randrange(size)
if v2[i] == 1:
v2[i] = 0
else:
v2[i] = 1
return v2
def showPredictions():
"""
Shows predictions of the TM when presented with the characters A, B, C, D, X, and
Y without any contextual information, that is, not embedded within a sequence.
"""
for k in range(6):
tm.reset()
print "--- " + "ABCDXY"[k] + " ---"
tm.compute(set(seqT[k][:].nonzero()[0].tolist()), learn=False)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
print("Active cols: " + str(np.nonzero(currentColumns)[0]))
print("Predicted cols: " + str(np.nonzero(predictedColumns)[0]))
print ""
def trainTM(sequence, timeSteps, noiseLevel):
"""
Trains the TM with given sequence for a given number of time steps and level of input
corruption
@param sequence (array) array whose rows are the input characters
@param timeSteps (int) number of time steps in which the TM will be presented with sequence
@param noiseLevel (float) amount of noise to be applied on the characters in the sequence
"""
currentColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
predictedColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
ts = 0
for t in range(timeSteps):
tm.reset()
for k in range(4):
v = corruptVector(sequence[k][:], noiseLevel, sparseCols)
tm.compute(set(v[:].nonzero()[0].tolist()), learn=True)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
acc = accuracy(currentColumns, predictedColumns)
x.append(ts)
y.append(acc)
ts += 1
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
uintType = "uint32"
random.seed(1)
tm = TM(columnDimensions = (2048,),
cellsPerColumn=8,
initialPermanence=0.21,
connectedPermanence=0.3,
minThreshold=15,
maxNewSynapseCount=40,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
activationThreshold=15,
predictedSegmentDecrement=0.01,
)
sparsity = 0.02
sparseCols = int(tm.numberOfColumns() * sparsity)
# We will create a sparse representation of characters A, B, C, D, X, and Y.
# In this particular example we manually construct them, but usually you would
# use the spatial pooler to build these.
seq1 = np.zeros((4, tm.numberOfColumns()), dtype="uint32")
seq1[0, 0:sparseCols] = 1 # Input SDR representing "A"
seq1[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seq1[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seq1[3, 3*sparseCols:4*sparseCols] = 1 # Input SDR representing "D"
seq2 = np.zeros((4, tm.numberOfColumns()), dtype="uint32")
seq2[0, 4*sparseCols:5*sparseCols] = 1 # Input SDR representing "X"
seq2[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seq2[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seq2[3, 5*sparseCols:6*sparseCols] = 1 # Input SDR representing "Y"
seqT = np.zeros((6, tm.numberOfColumns()), dtype="uint32")
seqT[0, 0:sparseCols] = 1 # Input SDR representing "A"
seqT[1, sparseCols:2*sparseCols] = 1 # Input SDR representing "B"
seqT[2, 2*sparseCols:3*sparseCols] = 1 # Input SDR representing "C"
seqT[3, 3*sparseCols:4*sparseCols] = 1 # Input SDR representing "D"
seqT[4, 4*sparseCols:5*sparseCols] = 1 # Input SDR representing "X"
seqT[5, 5*sparseCols:6*sparseCols] = 1 # Input SDR representing "Y"
# PART 1. Feed the TM with sequence "ABCD". The TM will eventually learn
# the pattern and it's prediction accuracy will go to 1.0 (except in-between sequences
# where the TM doesn't output any prediction)
print ""
print "-"*50
print "Part 1. We present the sequence ABCD to the TM. The TM will eventually"
print "will learn the sequence and predict the upcoming characters. This can be"
print "measured by the prediction accuracy in Fig 1."
print "N.B. In-between sequences the accuracy is 0.0 as the TM does not output"
print "any prediction."
print "-"*50
print ""
x = []
y = []
trainTM(seq1, timeSteps=10, noiseLevel=0.0)
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 1: TM learns sequence ABCD")
plt.savefig("figure_1")
plt.close()
print ""
print "-"*50
print "Once the TM has learned the sequence ABCD, we will present the individual"
print "characters to the TM to know its prediction. The TM outputs the columns"
print "that become active upon the presentation of a particular character as well"
print "as the columns predicted in the next time step. Here, you should see that"
print "A predicts B, B predicts C, C predicts D, and D does not output any"
print "prediction."
print "N.B. Here, we are presenting individual characters, that is, a character"
print "deprived of context in a sequence. There is no prediction for characters"
print "X and Y as we have not presented them to the TM in any sequence."
print "-"*50
print ""
showPredictions()
print ""
print "-"*50
print "Part 2. We now present the sequence XBCY to the TM. As expected, the accuracy will"
print "drop until the TM learns the new sequence (Fig 2). What will be the prediction of"
print "the TM if presented with the sequence BC? This would depend on what character"
print "anteceding B. This is an important feature of high-order sequences."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=10, noiseLevel=0.0)
# In this figure you can see how the TM starts making good predictions for particular
# characters (spikes in the plot). Then, it will get half of its predictions right, which
# correspond to the times in which is presented with character C. After some time, it
# will learn correctly the sequence XBCY, and predict its characters accordingly.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 2: TM learns new sequence XBCY")
plt.savefig("figure_2")
plt.close()
print ""
print "-"*50
print "We will present again each of the characters individually to the TM, that is,"
print "not within any of the two sequences. When presented with character A the TM"
print "predicts B, B predicts C, but this time C outputs a simultaneous prediction of"
print "both D and Y. In order to disambiguate, the TM would require to know if the"
print "preceding characters were AB or XB. When presented with character X the TM"
print "predicts B, whereas Y and D yield no prediction."
print "-"*50
print ""
showPredictions()
# PART 3. Now we will present noisy inputs to the TM. We will add noise to the sequence XBCY
# by corrupting 30% of its bits. We would like to see how the TM responds in the presence of
# noise and how it recovers from it.
print ""
print "-"*50
print "Part 3. We will add noise to the sequence XBCY by corrupting 30% of the bits in the vectors"
print "encoding each character. We would expect to see a decrease in prediction accuracy as the"
print "TM is unable to learn the random noise in the input (Fig 3). However, this decrease is not"
print "significant."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.3)
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 3: Accuracy in TM with 30% noise in input")
plt.savefig("figure_3")
plt.close()
print ""
print "-"*50
print "Let's have a look again at the output of the TM when presented with noisy"
print "input (30%). Here, the noise is low that the TM is not affected by it,"
print "which would be the case if we saw 'noisy' columns being predicted when"
print "presented with individual characters. Thus, we could say that the TM exhibits"
print "resilience to noise in its input."
print "-"*50
print ""
showPredictions()
# Let's corrupt the sequence more by adding 50% of noise to each of its characters.
# Here, we would expect to see some 'noisy' columns being predicted when the TM is
# presented with the individual characters.
print ""
print "-"*50
print "Now, we will set noise to be 50% of the bits in the characters X, B, C, and Y."
print "As expected, the accuracy will decrease (Fig 5) and 'noisy' columns will be"
print "predicted by the TM."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.5)
print ""
print "-"*50
print "Let's have a look again at the output of the TM when presented with noisy"
print "input. The prediction of some characters (eg. X) now includes columns that"
print "are not related to any other character. This is because the TM tried to learn"
print "the noise in the input patterns."
print "-"*50
print ""
showPredictions()
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 4: Accuracy in TM with 50% noise in input")
plt.savefig("figure_4")
plt.close()
# Will the TM be able to forget the 'noisy' columns learned in the previous step?
# We will present the TM with the original sequence XBCY so it forgets the 'noisy'.
# columns.
x = []
y = []
trainTM(seq2, timeSteps=10, noiseLevel=0.0)
print ""
print "-"*50
print "After presenting the original sequence XBCY to the TM, we would expect to see"
print "the predicted noisy columns from the previous step disappear. We will verify that"
print "by presenting the individual characters to the TM."
print "-"*50
print ""
showPredictions()
# We can see how the prediction accuracy goes back to 1.0 (as before, not in-between sequences)
# when the TM 'forgets' the noisy columns.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 5: TM forgets noise in sequence XBCY when noise is over")
plt.savefig("figure_5")
plt.close()
# Let's corrupt the sequence even more and add 90% of noise to each of its characters.
# Here, we would expect to see even more of a decrease in accuracy along with more 'noisy'
# columns being predicted.
print ""
print "-"*50
print "We will add more noise to the characters in the sequence XBCY. This time we will"
print "corrupt 90% of its contents. As expected, the accuracy will decrease (Fig 6) and"
print "'noisy' columns will be predicted by the TM."
print "-"*50
print ""
x = []
y = []
trainTM(seq2, timeSteps=50, noiseLevel=0.9)
print ""
print "-"*50
print "Next, we will have a look at the output of the TM when presented with the"
print "individual characters of the sequence. As before, we see 'noisy' predicted"
print "columns emerging as a result of the TM trying to learn the noise."
print "-"*50
print ""
showPredictions()
# In this figure we can observe how the prediction accuracy is affected by the presence
# of noise in the input. However, the accuracy does not drops dramatically even with 90%
# of noise which implies that the TM exhibits some resilience to noise in its input
# which means that it does not forget easily a well-learned, real pattern.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 6: Accuracy with 90% noise in input")
plt.savefig("figure_6")
plt.close()
# Let's present the original sequence to the TM in order to make it forget the noisy columns.
# After this, the TM will predict accurately the sequence again, and its predictions will
# not include 'noisy' columns anymore.
x = []
y = []
trainTM(seq2, timeSteps=25, noiseLevel=0.0)
# We will observe how the prediction accuracy gets back to 1.0 (not in-between sequences)
# as the TM is presented with the original sequence.
plt.ylim([-0.1,1.1])
plt.plot(x, y)
plt.xlabel("Timestep")
plt.ylabel("Prediction Accuracy")
plt.title("Fig. 7: When noise is suspended, accuracy is restored")
plt.savefig("figure_7")
plt.close()
# The TM restores its prediction accuracy and it can be seen when presented with the individual characters.
# There's no noisy columns being predicted.
print ""
print "-"*50
print "After presenting noisy input to the TM, we present the original sequence in"
print "order to make it re-learn XBCY. We verify that this was achieved by presenting"
print "the TM with the individual characters and observing its output. Again, we can"
print "see that the 'noisy' columns are not being predicted anymore, and that the"
print "prediction accuracy goes back to 1.0 when the sequence is presented (Fig 7)."
print "-"*50
print ""
showPredictions()
# PART 4. Now, we will present both sequences ABCD and XBCY randomly to the TM.
# For this purpose we will start with a new TM.
# What would be the output of the TM when presented with character D if it has
# been exposed to sequences ABCD and XBCY occurring randomly one after the other?
# If one quarter of the time the TM sees the sequence ABCDABCD, another quarter the
# TM sees ABCDXBCY, another quarter it sees XBCYXBCY, and the last quarter it saw
# XBCYABCD, then the TM would exhibit simultaneous predictions for characters D, Y
# and C.
print ""
print "-"*50
print "Part 4. We will present both sequences ABCD and XBCY randomly to the TM."
print "Here, we might observe simultaneous predictions occurring when the TM is"
print "presented with characters D, Y, and C. For this purpose we will use a"
print "blank TM"
print "NB. Here we will not reset the TM after presenting each sequence with the"
print "purpose of making the TM learn different predictions for D and Y."
print "-"*50
print ""
tm = TM(columnDimensions = (2048,),
cellsPerColumn=8,
initialPermanence=0.21,
connectedPermanence=0.3,
minThreshold=15,
maxNewSynapseCount=40,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
activationThreshold=15,
predictedSegmentDecrement=0.01,
)
for t in range(75):
rnd = random.randrange(2)
for k in range(4):
if rnd == 0:
tm.compute(set(seq1[k][:].nonzero()[0].tolist()), learn=True)
else:
tm.compute(set(seq2[k][:].nonzero()[0].tolist()), learn=True)
print ""
print "-"*50
print "We now have a look at the output of the TM when presented with the individual"
print "characters A, B, C, D, X, and Y. We might observe simultaneous predictions when"
print "presented with character D (predicting A and X), character Y (predicting A and X),"
print "and when presented with character C (predicting D and Y)."
print "N.B. Due to the stochasticity of this script, we might not observe simultaneous"
print "predictions in *all* the aforementioned characters."
print "-"*50
print ""
showPredictions()
print ""
print "-*"*25
print "Scroll up to see the development of this simple"
print "tutorial. Also open the source file to see more"
print "comments regarding each part of the script."
print "All images generated by this script will be saved"
print "in your current working directory."
print "-*"*25
print ""
| agpl-3.0 |
heshamelmatary/rtems-rumpkernel | testsuites/tmtests/tmcontext01/plot.py | 14 | 1341 | #
# Copyright (c) 2014 embedded brains GmbH. All rights reserved.
#
# The license and distribution terms for this file may be
# found in the file LICENSE in this distribution or at
# http://www.rtems.org/license/LICENSE.
#
import libxml2
from libxml2 import xmlNode
import matplotlib.pyplot as plt
doc = libxml2.parseFile("tmcontext01.scn")
ctx = doc.xpathNewContext()
colors = ['k', 'r', 'b', 'g', 'y', 'm']
def plot(y, color, label, first):
n=len(y)
x=range(0, n)
if first:
plt.plot(x, y, color=color, label=label)
else:
plt.plot(x, y, color=color)
plt.title("context switch timing test")
plt.xlabel('function nest level')
plt.ylabel('context switch time [ns]')
c = 0
for e in ["normal", "dirty"]:
first = True
for i in ["Min", "Q1", "Q2", "Q3", "Max"]:
y=map(xmlNode.getContent, ctx.xpathEval("/Test/ContextSwitchTest[@environment='" + e + "' and not(@load)]/Sample/" + i))
plot(y, colors[c], e, first)
first = False
c = c + 1
load = 1
while load > 0:
first = True
for i in ["Min", "Q1", "Q2", "Q3", "Max"]:
y=map(xmlNode.getContent, ctx.xpathEval("/Test/ContextSwitchTest[@environment='dirty' and @load='" + str(load) + "']/Sample/" + i))
if len(y) > 0:
plot(y, colors[c], "load " + str(load), first)
first = False
else:
load = 0
if load > 0:
load = load + 1
c = c + 1
plt.legend()
plt.show()
| gpl-2.0 |
jrbourbeau/composition | analysis/plotting_functions.py | 1 | 4563 | #!/usr/bin/env python
from __future__ import division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn.apionly as sns
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedShuffleSplit, cross_val_score, train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import validation_curve, GridSearchCV
# Plotting decision regions
def plot_decision_regions(X, y, classifier, resolution=0.02, scatter_fraction=0.025, ax=None):
# setup marker generator and color map
markers = ('s', '^', 'o', '^', 'v')
colors = ('b', 'g', 'r', 'y', 'c')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min(), X[:, 0].max()
x2_min, x2_max = X[:, 1].min(), X[:, 1].max()
# x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
# x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
if ax is None:
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap, aspect='auto')
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
else:
ax.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap, aspect='auto')
ax.set_xlim(xx1.min(), xx1.max())
ax.set_ylim(xx2.min(), xx2.max())
# plot class samples
if scatter_fraction != None:
fraction_event_selection_mask = (np.random.uniform(0, 1, len(y)) <= scatter_fraction)
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[(y == cl) & fraction_event_selection_mask, 0],
y=X[(y == cl) & fraction_event_selection_mask, 1],
alpha=0.5, c=cmap(idx),
marker=markers[idx], label=cl)
def histogram_2D(x, y, bins, log_counts=False, make_prob=False, ax=None, **opts):
h, xedges, yedges = np.histogram2d(x, y, bins=bins, normed=False)
h = np.rot90(h)
h = np.flipud(h)
h = np.ma.masked_where(h == 0, h)
if make_prob:
ntot = np.sum(h, axis=0).astype('float')
ntot[ntot == 0] = 1.
h /= ntot
if log_counts:
h = np.log10(h)
# extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
colormap = 'viridis'
if ax != None:
im = ax.imshow(h, extent=extent, origin='lower',
interpolation='none', cmap=colormap, aspect='auto')
else:
im = plt.imshow(h, extent=extent, origin='lower',
interpolation='none', cmap=colormap, aspect='auto')
# if log_counts:
# plt.colorbar(im, label='$\log_{10}(\mathrm{Counts})$')
# else:
# plt.colorbar(im, label='$Counts$')
if not make_prob and not log_counts:
cb = plt.colorbar(im, label='Counts')
if not make_prob and log_counts:
cb = plt.colorbar(im, label='$\log_{10}(\mathrm{Counts})$')
return im
def make_comp_frac_histogram(x, y, proton_mask, iron_mask, bins, ax):
# charge_bins = np.linspace(0, 7, 50)
# energy_bins = np.linspace(6.2, 9.51, 50)
# energy_bins = np.arange(6.2, 9.51, 0.05)
# energy_midpoints = (energy_bins[1:] + energy_bins[:-1]) / 2
proton_hist, xedges, yedges = np.histogram2d(x[proton_mask],
y[proton_mask],
bins=bins,
normed=False)
proton_hist = np.ma.masked_where(proton_hist == 0, proton_hist)
iron_hist, xedges, yedges = np.histogram2d(x[iron_mask],
y[iron_mask],
bins=bins,
normed=False)
h = proton_hist / (proton_hist + iron_hist)
h = np.rot90(h)
h = np.flipud(h)
h = np.ma.masked_where(h == 0, h)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
colormap = 'coolwarm'
im = ax.imshow(h, extent=extent, origin='lower',
interpolation='none', cmap=colormap,
aspect='auto', vmin=0, vmax=1)
x = np.arange(6.2, 9.51, 0.1)
return im
| mit |
airanmehr/Utils | Estimate.py | 1 | 17550 | '''
Copyleft Oct 10, 2015 Arya Iranmehr, PhD Student, Bafna's Lab, UC San Diego, Email: [email protected]
'''
import sys
import numpy as np
import pandas as pd
try:
from SFSelect import metaSVM
sys.modules['metaSVM'] = metaSVM
#Models are loaded once.
from SFSelect.SFselect import sfselect,SVM,SVMXP
except:
SVM,SVMXP= None,None
class Estimate:
@staticmethod
def pi(a):
snp=a==1
if isinstance(snp,pd.DataFrame): snp=snp.values
n=snp.shape[0]
return np.mean([sum(np.logical_xor(snp[i,:],snp[j,:])) for i in range(n-1) for j in range(i+1,n)])
@staticmethod
def getSAFS(x,bins=10, n=None,fixedRangeHist=True,removeFixedSites=False,fold=False,normed=False,noBinSmalln=False):
afs=Estimate.getAFS(x, bins, n=n, fixedRangeHist=fixedRangeHist,removeFixedSites=removeFixedSites,normed=normed,fold=fold,noBinSmalln=noBinSmalln)
if afs is not None:
return afs*afs.index.values
else:
return None
@staticmethod
def getAFS(freqs,bins=10,n=None,fixedRangeHist=True,removeFixedSites=True,name=None,normed=False,fold=False,noBinSmalln=False):
if removeFixedSites:
x=freqs[(freqs>0) & (freqs<1)].copy(True)
else:
x=freqs.copy(True)
if noBinSmalln:
nn=1./x.value_counts().sort_index().index[0]
if nn <2*bins:bins=-1
if n is None: n=nn
if not x.size:
if n is not None:
return pd.Series(0,index=range(1,n))
if fold:
x[x>0.5]=1-x[x>0.5]
if n/2 <2*bins: bins=-1
if bins>0:
if fixedRangeHist:
rang=[0,1]
if x.max()<=0.5: rang=[0,0.5]
counts,limits=np.histogram(x, bins,range=rang) # counts=xi
else:
counts,limits=np.histogram(x, bins) # counts=xi
centers = 0.5*(limits[1:]+limits[:-1]) # center = i
if n is not None:
centers=centers*n
afs= pd.Series(counts, index=centers) # site frequency spectrum
else:
afs=pd.Series(x.round(3)).value_counts()
if not len(afs.index): return None
if n is not None:
afs.index=np.round(afs.index*n)
afs=afs.sort_index()
if 0 in afs.index:
if 1 in afs.index:
afs.loc[1]+=afs.loc[0]
else:
afs.loc[1]=afs.loc[0]
afs=afs.drop(0).sort_index()
if name is not None:
afs.name=name
if bins==-1:
afs=(afs+pd.Series(0,range(1,(n,int(n/2)+1)[fold]))).fillna(0)
afs.index=map(int,afs.index)
if normed:
return afs/float(afs.sum())
return afs
@staticmethod
def plotSAFS(x,bins=10,n=None, fixedRangeHist=True,removeFixedSites=False):
Estimate.getSAFS(x, bins=bins, n=n, fixedRangeHist=fixedRangeHist,removeFixedSites=removeFixedSites).plot(kind='bar',width=1,grid=True);plt.xlim([-0.5,bins-0.5])
@staticmethod
def getWeights(safs,method,n):
i=safs.index.values*1.0
if method is 'watterson':
w=1/i/(1./np.arange(1,n)).sum()
elif method is 'pi':
w=(n-i) /((n*(n-1)/2.))
elif method is 'faywu':
w=i / (n*(n-1)/2.)
return w
@staticmethod
def getEstimate(x=None,x_XP_pop=None, n=None, snp=None, method='all', bins=-1, normalizeTajimaD=True, averageResults=False,
svm_model_sfselect=SVM, fixedRangeHist=True, removeFixedSites=True):
"""
Compute different estimates either based on AF (x and n should be given) or SNP matrix (only SNP matrix suffices)
watterson: watterson's theta
pi: tajima's pi (avg heterozygosity)
faywu: FayWu estimate of theta
tajimaD: pi-watterson ***********************FOR THIS CASE -TAJIMAD IS RETURNED TO BE A PREDICTOR OF SELECTION, (NEG TAJIMAD MEANS SELECTION)
H: pi-faywu ***********************FOR THIS CASE -H IS RETURNED TO BE A PREDICTOR OF SELECTION, (NEG H MEANS SELECTION)
Parametes:
x: can be either tensor, matrix or vector of AFS with dimension T x L x R, or L x R or L
n: number of samples
snp: snp matrix in dataframe format
"""
if x is not None: # if AF is given
if len(x.shape)==3: # if x is tensor T x L x R
return pd.DataFrame([Estimate.getEstimate(x=xt, method=method, bins=bins, n=n, normalizeTajimaD=normalizeTajimaD, averageResults=averageResults, svm_model_sfselect=svm_model_sfselect, fixedRangeHist=fixedRangeHist,removeFixedSites=removeFixedSites) for xt in x])
elif len(x.shape)==2: # if x is matrix L x R
if averageResults:
return np.mean([Estimate.getEstimate(x=xr, method=method, bins=bins, n=n, normalizeTajimaD=normalizeTajimaD, averageResults=averageResults, svm_model_sfselect=svm_model_sfselect, fixedRangeHist=fixedRangeHist,removeFixedSites=removeFixedSites) for xr in x.T])
else:
return ([Estimate.getEstimate(x=xr, method=method, bins=bins, n=n, normalizeTajimaD=normalizeTajimaD, averageResults=averageResults, svm_model_sfselect=svm_model_sfselect, fixedRangeHist=fixedRangeHist,removeFixedSites=removeFixedSites) for xr in x.T])
elif len(x.shape)==1: # if x is L-dim vector of AF
if method=='SFSelect':
if x_XP_pop is not None:
svm_model_sfselect = SVMXP
return sfselect(x,neut_pop_freqs=x_XP_pop, svm=svm_model_sfselect,removeFixedSites=removeFixedSites)['score']
safs=Estimate.getSAFS(x=x, bins=bins, n=n, fixedRangeHist=fixedRangeHist,removeFixedSites=removeFixedSites)
elif snp is not None: # if SNP is given
safs=snp.sum(0).value_counts().sort_index()
safs=safs[safs.index!=0]
safs = safs* safs.index.values
n=snp.shape[0]
else:
return None
if method=='all':
if removeFixedSites:
m=x[(x>0)&(x<1)].size
else:
m=x.size
return ({
'm':m,
'w': Estimate.getEstimate(x=x, n=n, snp=snp, method='watterson',
bins=bins,
averageResults=averageResults,
svm_model_sfselect=svm_model_sfselect,
fixedRangeHist=fixedRangeHist,
removeFixedSites=removeFixedSites),
'pi': Estimate.getEstimate(x=x, n=n, snp=snp, method='pi',
bins=bins,
averageResults=averageResults,
svm_model_sfselect=svm_model_sfselect,
fixedRangeHist=fixedRangeHist,
removeFixedSites=removeFixedSites),
'D': Estimate.getEstimate(x=x, n=n, snp=snp, method='tajimaD',
bins=bins, normalizeTajimaD=normalizeTajimaD,
averageResults=averageResults,
svm_model_sfselect=svm_model_sfselect,
fixedRangeHist=fixedRangeHist,
removeFixedSites=removeFixedSites),
'H': Estimate.getEstimate(x=x, n=n, snp=snp, method='H',
bins=bins,
averageResults=averageResults,
svm_model_sfselect=svm_model_sfselect,
fixedRangeHist=fixedRangeHist,
removeFixedSites=removeFixedSites),
'SFSelect': Estimate.getEstimate(x=x, n=n, snp=snp, method='SFSelect', bins=bins, normalizeTajimaD=True,
averageResults=averageResults, svm_model_sfselect=svm_model_sfselect,
fixedRangeHist=fixedRangeHist, removeFixedSites=removeFixedSites)})
if safs is None:return None
if method is 'tajimaD':
w = Estimate.getWeights(safs, 'pi', n) - Estimate.getWeights(safs, 'watterson', n)
if normalizeTajimaD:
if x is not None:
m = len(x)
if snp is not None:
m = snp.shape[1]
sig = Estimate.tajimaDstd(n=n, m=m)
w /= sig
elif method is 'H':
w = Estimate.getWeights(safs, 'pi', n) - Estimate.getWeights(safs, 'faywu', n)
else:
w=Estimate.getWeights(safs, method, n)
return safs.dot(w)
@staticmethod
def tajimaDstd(n,m):
a1=(1./np.arange(1,n)).sum(); a2=(1./(np.arange(1,n)**2).sum())
b1=(n+1.0)/(3*n-3); b2=2.0*(n*n+n+3)/(9*n*n-9*n)
c1=b1-1.0/a1; c2=b2 -(n+2)/(a1*n) +a2/(a1*a1)
e1=c1/a1; e2=c2/(a1*a1+a2)
return np.sqrt(e1*m+e2*m*m-e2*m)
@staticmethod
def watterson(snp=None,n=None,m=None):
"""
computes watterson estimate of theta
snp: is m x n numpy matrix
n: number of samples
m: number of segregating sites
"""
if n is None or m is None:
n,m=snp.shape
return m/(1./np.arange(1,n)).sum()
@staticmethod
def rho(snp):
return snp.shape[1]/np.log(snp.shape[0])
@staticmethod
def getAllEstimates(snp):
thetaw=Estimate.watterson(snp)
pi=Estimate.pi(snp)
tajD=(pi-thetaw)#/Estimate.tajimaDstd(n=snp.shape[0],m=snp.shape[1])
n=snp.shape[0]
x=snp.mean(0)
fay=2.*n/(n-1)*np.linalg.norm(x)**2
sfsel = Estimate.getEstimate(x=x, n=snp.shape[0], method='SFSelect')
return pd.DataFrame([('Theta', thetaw),('Pi',pi),('TajimaD',tajD),('m',int(snp.shape[1])), ('FayWu', fay),('SFSelect', sfsel)],columns=['method','estimate'])
@staticmethod
def getAllEstimatesX(X, n=200, bins=-1, method=None):
x=X[(X!=0) & (X!=1)]
m = x.size
if not m:
i,v=zip(*[('Theta', None), ('Pi', None), ('TajimaD', None), ('m', None), ('FayWu', None), ('SFSelect', None)])
all = pd.Series(v,index=i,name=X.name)
all.index.name='method'
if method is None:
return all
else:
return None
thetaw = Estimate.watterson(n=n, m=m)
pi = Estimate.getEstimate(x=x, n=n, method='pi', bins=bins)
tajD = (pi - thetaw) #/ Estimate.tajimaDstd(n=n, m=m)
fay = Estimate.getEstimate(x=x, n=n, method='H', bins=bins)
sfsel = Estimate.getEstimate(x=x, n=n, method='SFSelect')
i,v=zip(*[('Theta', thetaw), ('Pi', pi), ('TajimaD', tajD), ('m', int(m)), ('FayWu', fay), ('SFSelect', sfsel)])
all = pd.Series(v,index=i,name=X.name)
all.index.name='method'
if method is None:
return all
else:
return all.loc[method]
@staticmethod
def mu(S=6188,l=21647181,T=5300000):
"""S/2 = mu*l*T"""
return S/(2.*l*T)
@staticmethod
def Ne(mu,S=25742,n=12,l=21647181):
"""S = mu*N*Ttot*l"""
Ttot= sum(2./np.arange(1,n))
print 'Ttot' , Ttot
print 'Ttotmu' , (mu*Ttot)
return S/(mu*Ttot*l)
@staticmethod
def LDold(SNP,site=None,sites=None,positions=None,measure='DPrime'):
"""
Computes All Measures of LD between all sites of a SNP matrix to a site
SNP: a pandas dataframe which its columns contains position
Site: index of the column which LD is computed for
Sites: index of the columns which pairwise LD is computed for
Positions: positions which pairwise LD is computed for
measure: {'all', 'D','DPrime','Rho','RhoPrime', 'Rho2', 'DPrime2'}
NOTE THAT RhoPrime=DPrime
"""
if site is None:
if sites is not None:
D=pd.DataFrame(map( lambda x: Estimate.LD(SNP.iloc[:,sites],x,measure),range(sites.shape[0])));
elif positions is not None:
D=pd.DataFrame(map( lambda x: Estimate.LD(SNP.loc[:,positions],x,measure),range(positions.shape[0])));
else:
D=pd.DataFrame(map( lambda x: Estimate.LD(SNP,x,measure),range(SNP.shape[1])));
D.index=D.columns
return D
LD=[]
p0=(SNP.iloc[:,site]==0).mean()
p1=(SNP.iloc[:,site]==1).mean()
for i in range(SNP.shape[1]):
q0=np.mean(SNP.iloc[:,i]==0)
q1=np.mean(SNP.iloc[:,i]==1)
x00=( ( SNP.iloc[:,i]==0) & (SNP.iloc[:,site]==0 ) ).mean()
x11 = ((SNP.iloc[:, i] == 1) & (SNP.iloc[:, site] == 1)).mean()
D = (x00 + x11) / 2. - p0 * q0
if D<0:
Dmax=min(p0*q0,p1*q1)
else:
Dmax=min(p0*q1,p1*q0)
Dprime=D/Dmax
denom=np.sqrt(p0*p1*q0*q1)
if denom:
rho=D/denom
LD.append((D,Dprime, rho, rho/(Dmax/denom), {'p':[p0,p1],'q':[q0,q1],'x00':x00},Dmax))
else:
LD.append((D, Dprime, None, None) )
LD= pd.DataFrame(LD,index=SNP.columns,columns=['D','DPrime','Rho','RhoPrime','Freq','Dmax'])
LD['Rho2']=LD.Rho**2
LD['DPrime2']=LD.DPrime**2
if measure=='all':
return LD
else:
return LD[measure]
@staticmethod
def LD(SNP,site=None,sites=None,positions=None,measure='DPrime'):
"""
Computes All Measures of LD between all sites of a SNP matrix to a site
SNP: a pandas dataframe which its columns contains position
Site: index of the column which LD is computed for
Sites: index of the columns which pairwise LD is computed for
Positions: positions which pairwise LD is computed for
measure: {'D','DPrime','Rho','RhoPrime'}
NOTE THAT RhoPrime=DPrime
"""
measures=np.array(['D','DPrime','Rho','RhoPrime'])
measureid=np.where(measures==measure)[0][0]
if site is None:
if sites is not None:
D=pd.DataFrame(map( lambda x: LDvectorized(SNP.iloc[:,sites].values,x,measureid),range(sites.shape[0])),columns=SNP.columns[sites]);
elif positions is not None:
D=pd.DataFrame(map( lambda x: LDvectorized(SNP.loc[:,positions].values,x,measureid),range(positions.shape[0])),columns=positions);
else:
D=pd.DataFrame(map( lambda x: LDvectorized(SNP.values,x,measureid),range(SNP.shape[1])),columns=SNP.columns);
D.index=D.columns
else:
D=pd.Series(LDvectorized(SNP.values,site,measureid),index=SNP.columns)
return D
@staticmethod
def LD_usingR(H0):
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
robjects.r('options(warn=-1)')
robjects.r('library(genetics)')
genotype = H0.applymap(lambda x: ('A', 'G')[x == 0])
genotype = pd.concat(
[pd.concat([genotype.iloc[i], genotype.iloc[i + 1]], axis=1).apply(lambda x: '/'.join(x), axis=1) for i
in np.arange(0, H0.shape[0], 2)], axis=1).T
c = robjects.r['LD'](robjects.r['makeGenotypes'](pandas2ri.py2ri_pandasdataframe(genotype.astype('str'))))
c = pd.Series(map(lambda x: pd.DataFrame(pandas2ri.ri2py(x)), c[1:]), index=list(c.names[1:])).apply(
lambda x: x.fillna(0))
for x in c:
x += x.T
x.index = H0.columns;
x.columns = H0.columns;
c.apply(lambda x: np.fill_diagonal(x.values, None))
return c
from numba import guvectorize
@guvectorize(['void(float64[:,:],int64[:],int64[:],float64[:])'],'(M,N),(),()->(N)')
def LDvectorized(SNP,site,measure,LD):
"""
Computes All Measures of LD between all sites of a SNP matrix to a site
SNP: a pandas dataframe which its columns contains position
Site: index of the column which LD is computed for
Sites: index of the columns which pairwise LD is computed for
Positions: positions which pairwise LD is computed for
measure: {'all', 'D','DPrime','Rho','RhoPrime', 'Rho2', 'DPrime2'}
NOTE THAT RhoPrime=DPrime
"""
# LD=np.zeros((SNP.shape[1],4) )
site=site[0]
p0=(SNP[:,site]==0).mean()
p1=(SNP[:,site]==1).mean()
for i in range(SNP.shape[1]):
q0=np.mean(SNP[:,i]==0)
q1=np.mean(SNP[:,i]==1)
x00=( ( SNP[:,i]==0) & (SNP[:,site]==0 ) ).mean()
D = (x00) - p0 * q0
if D<0:
Dmax=min(p0*q0,p1*q1)
else:
Dmax=min(p0*q1,p1*q0)
Dprime=D/Dmax
denom=np.sqrt(p0*p1*q0*q1)
if denom:
rho=D/denom
LD[i]=[D,Dprime, rho, rho/(Dmax/denom)][measure[0]]
else:
LD[i]=[D, Dprime, None, None][measure[0]] | mit |
saiwing-yeung/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
nguyentu1602/statsmodels | statsmodels/duration/hazard_regression.py | 16 | 60858 | import numpy as np
from statsmodels.base import model
import statsmodels.base.model as base
from statsmodels.tools.decorators import cache_readonly
from scipy.optimize import brent
"""
Implementation of proportional hazards regression models for duration
data that may be censored ("Cox models").
References
----------
T Therneau (1996). Extending the Cox model. Technical report.
http://www.mayo.edu/research/documents/biostat-58pdf/DOC-10027288
G Rodriguez (2005). Non-parametric estimation in survival models.
http://data.princeton.edu/pop509/NonParametricSurvival.pdf
B Gillespie (2006). Checking the assumptions in the Cox proportional
hazards model.
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf
"""
_predict_docstring = """
Returns predicted values from the proportional hazards
regression model.
Parameters
----------
params : array-like
The proportional hazards model parameters.
exog : array-like
Data to use as `exog` in forming predictions. If not
provided, the `exog` values from the model used to fit the
data are used.%(cov_params_doc)s
endog : array-like
Duration (time) values at which the predictions are made.
Only used if pred_type is either 'cumhaz' or 'surv'. If
using model `exog`, defaults to model `endog` (time), but
may be provided explicitly to make predictions at
alternative times.
strata : array-like
A vector of stratum values used to form the predictions.
Not used (may be 'None') if pred_type is 'lhr' or 'hr'.
If `exog` is None, the model stratum values are used. If
`exog` is not None and pred_type is 'surv' or 'cumhaz',
stratum values must be provided (unless there is only one
stratum).
offset : array-like
Offset values used to create the predicted values.
pred_type : string
If 'lhr', returns log hazard ratios, if 'hr' returns
hazard ratios, if 'surv' returns the survival function, if
'cumhaz' returns the cumulative hazard function.
Returns
-------
A bunch containing two fields: `predicted_values` and
`standard_errors`.
Notes
-----
Standard errors are only returned when predicting the log
hazard ratio (pred_type is 'lhr').
Types `surv` and `cumhaz` require estimation of the cumulative
hazard function.
"""
_predict_cov_params_docstring = """
cov_params : array-like
The covariance matrix of the estimated `params` vector,
used to obtain prediction errors if pred_type='lhr',
otherwise optional."""
class PHSurvivalTime(object):
def __init__(self, time, status, exog, strata=None, entry=None,
offset=None):
"""
Represent a collection of survival times with possible
stratification and left truncation.
Parameters
----------
time : array_like
The times at which either the event (failure) occurs or
the observation is censored.
status : array_like
Indicates whether the event (failure) occurs at `time`
(`status` is 1), or if `time` is a censoring time (`status`
is 0).
exog : array_like
The exogeneous (covariate) data matrix, cases are rows and
variables are columns.
strata : array_like
Grouping variable defining the strata. If None, all
observations are in a single stratum.
entry : array_like
Entry (left truncation) times. The observation is not
part of the risk set for times before the entry time. If
None, the entry time is treated as being zero, which
gives no left truncation. The entry time must be less
than or equal to `time`.
offset : array-like
An optional array of offsets
"""
# Default strata
if strata is None:
strata = np.zeros(len(time), dtype=np.int32)
# Default entry times
if entry is None:
entry = np.zeros(len(time))
# Parameter validity checks.
n1, n2, n3, n4 = len(time), len(status), len(strata),\
len(entry)
nv = [n1, n2, n3, n4]
if max(nv) != min(nv):
raise ValueError("endog, status, strata, and " +
"entry must all have the same length")
if min(time) < 0:
raise ValueError("endog must be non-negative")
if min(entry) < 0:
raise ValueError("entry time must be non-negative")
# In Stata, this is entry >= time, in R it is >.
if np.any(entry > time):
raise ValueError("entry times may not occur " +
"after event or censoring times")
# Get the row indices for the cases in each stratum
if strata is not None:
stu = np.unique(strata)
#sth = {x: [] for x in stu} # needs >=2.7
sth = dict([(x, []) for x in stu])
for i,k in enumerate(strata):
sth[k].append(i)
stratum_rows = [np.asarray(sth[k], dtype=np.int32) for k in stu]
stratum_names = stu
else:
stratum_rows = [np.arange(len(time)),]
stratum_names = [0,]
# Remove strata with no events
ix = [i for i,ix in enumerate(stratum_rows) if status[ix].sum() > 0]
stratum_rows = [stratum_rows[i] for i in ix]
stratum_names = [stratum_names[i] for i in ix]
# The number of strata
nstrat = len(stratum_rows)
self.nstrat = nstrat
# Remove subjects whose entry time occurs after the last event
# in their stratum.
for stx,ix in enumerate(stratum_rows):
last_failure = max(time[ix][status[ix] == 1])
# Stata uses < here, R uses <=
ii = [i for i,t in enumerate(entry[ix]) if
t <= last_failure]
stratum_rows[stx] = stratum_rows[stx][ii]
# Remove subjects who are censored before the first event in
# their stratum.
for stx,ix in enumerate(stratum_rows):
first_failure = min(time[ix][status[ix] == 1])
ii = [i for i,t in enumerate(time[ix]) if
t >= first_failure]
stratum_rows[stx] = stratum_rows[stx][ii]
# Order by time within each stratum
for stx,ix in enumerate(stratum_rows):
ii = np.argsort(time[ix])
stratum_rows[stx] = stratum_rows[stx][ii]
if offset is not None:
self.offset_s = []
for stx in range(nstrat):
self.offset_s.append(offset[stratum_rows[stx]])
else:
self.offset_s = None
# Number of informative subjects
self.n_obs = sum([len(ix) for ix in stratum_rows])
# Split everything by stratum
self.time_s = []
self.exog_s = []
self.status_s = []
self.entry_s = []
for ix in stratum_rows:
self.time_s.append(time[ix])
self.exog_s.append(exog[ix,:])
self.status_s.append(status[ix])
self.entry_s.append(entry[ix])
self.stratum_rows = stratum_rows
self.stratum_names = stratum_names
# Precalculate some indices needed to fit Cox models.
# Distinct failure times within a stratum are always taken to
# be sorted in ascending order.
#
# ufailt_ix[stx][k] is a list of indices for subjects who fail
# at the k^th sorted unique failure time in stratum stx
#
# risk_enter[stx][k] is a list of indices for subjects who
# enter the risk set at the k^th sorted unique failure time in
# stratum stx
#
# risk_exit[stx][k] is a list of indices for subjects who exit
# the risk set at the k^th sorted unique failure time in
# stratum stx
self.ufailt_ix, self.risk_enter, self.risk_exit, self.ufailt =\
[], [], [], []
for stx in range(self.nstrat):
# All failure times
ift = np.flatnonzero(self.status_s[stx] == 1)
ft = self.time_s[stx][ift]
# Unique failure times
uft = np.unique(ft)
nuft = len(uft)
# Indices of cases that fail at each unique failure time
#uft_map = {x:i for i,x in enumerate(uft)} # requires >=2.7
uft_map = dict([(x, i) for i,x in enumerate(uft)]) # 2.6
uft_ix = [[] for k in range(nuft)]
for ix,ti in zip(ift,ft):
uft_ix[uft_map[ti]].append(ix)
# Indices of cases (failed or censored) that enter the
# risk set at each unique failure time.
risk_enter1 = [[] for k in range(nuft)]
for i,t in enumerate(self.time_s[stx]):
ix = np.searchsorted(uft, t, "right") - 1
if ix >= 0:
risk_enter1[ix].append(i)
# Indices of cases (failed or censored) that exit the
# risk set at each unique failure time.
risk_exit1 = [[] for k in range(nuft)]
for i,t in enumerate(self.entry_s[stx]):
ix = np.searchsorted(uft, t)
risk_exit1[ix].append(i)
self.ufailt.append(uft)
self.ufailt_ix.append([np.asarray(x, dtype=np.int32) for x in uft_ix])
self.risk_enter.append([np.asarray(x, dtype=np.int32) for x in risk_enter1])
self.risk_exit.append([np.asarray(x, dtype=np.int32) for x in risk_exit1])
class PHReg(model.LikelihoodModel):
"""
Fit the Cox proportional hazards regression model for right
censored data.
Parameters
----------
endog : array-like
The observed times (event or censoring)
exog : 2D array-like
The covariates or exogeneous variables
status : array-like
The censoring status values; status=1 indicates that an
event occured (e.g. failure or death), status=0 indicates
that the observation was right censored. If None, defaults
to status=1 for all cases.
entry : array-like
The entry times, if left truncation occurs
strata : array-like
Stratum labels. If None, all observations are taken to be
in a single stratum.
ties : string
The method used to handle tied times, must be either 'breslow'
or 'efron'.
offset : array-like
Array of offset values
missing : string
The method used to handle missing data
Notes
-----
Proportional hazards regression models should not include an
explicit or implicit intercept. The effect of an intercept is
not identified using the partial likelihood approach.
`endog`, `event`, `strata`, `entry`, and the first dimension
of `exog` all must have the same length
"""
def __init__(self, endog, exog, status=None, entry=None,
strata=None, offset=None, ties='breslow',
missing='drop', **kwargs):
# Default is no censoring
if status is None:
status = np.ones(len(endog))
super(PHReg, self).__init__(endog, exog, status=status,
entry=entry, strata=strata,
offset=offset, missing=missing,
**kwargs)
# endog and exog are automatically converted, but these are
# not
if self.status is not None:
self.status = np.asarray(self.status)
if self.entry is not None:
self.entry = np.asarray(self.entry)
if self.strata is not None:
self.strata = np.asarray(self.strata)
if self.offset is not None:
self.offset = np.asarray(self.offset)
self.surv = PHSurvivalTime(self.endog, self.status,
self.exog, self.strata,
self.entry, self.offset)
# TODO: not used?
self.missing = missing
ties = ties.lower()
if ties not in ("efron", "breslow"):
raise ValueError("`ties` must be either `efron` or " +
"`breslow`")
self.ties = ties
@classmethod
def from_formula(cls, formula, data, status=None, entry=None,
strata=None, offset=None, subset=None,
ties='breslow', missing='drop', *args, **kwargs):
"""
Create a proportional hazards regression model from a formula
and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
status : array-like
The censoring status values; status=1 indicates that an
event occured (e.g. failure or death), status=0 indicates
that the observation was right censored. If None, defaults
to status=1 for all cases.
entry : array-like
The entry times, if left truncation occurs
strata : array-like
Stratum labels. If None, all observations are taken to be
in a single stratum.
offset : array-like
Array of offset values
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
ties : string
The method used to handle tied times, must be either 'breslow'
or 'efron'.
missing : string
The method used to handle missing data
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : PHReg model instance
"""
# Allow array arguments to be passed by column name.
if type(status) is str:
status = data[status]
if type(entry) is str:
entry = data[entry]
if type(strata) is str:
strata = data[strata]
if type(offset) is str:
offset = data[offset]
mod = super(PHReg, cls).from_formula(formula, data,
status=status, entry=entry, strata=strata,
offset=offset, subset=subset, ties=ties,
missing=missing, *args, **kwargs)
return mod
def fit(self, groups=None, **args):
"""
Fit a proportional hazards regression model.
Parameters
----------
groups : array-like
Labels indicating groups of observations that may be
dependent. If present, the standard errors account for
this dependence. Does not affect fitted values.
Returns a PHregResults instance.
"""
# TODO process for missing values
if groups is not None:
self.groups = np.asarray(groups)
else:
self.groups = None
if 'disp' not in args:
args['disp'] = False
fit_rslts = super(PHReg, self).fit(**args)
if self.groups is None:
cov_params = fit_rslts.cov_params()
else:
cov_params = self.robust_covariance(fit_rslts.params)
results = PHRegResults(self, fit_rslts.params, cov_params)
return results
def fit_regularized(self, method="coord_descent", maxiter=100,
alpha=0., L1_wt=1., start_params=None,
cnvrg_tol=1e-7, zero_tol=1e-8, **kwargs):
"""
Return a regularized fit to a linear regression model.
Parameters
----------
method :
Only the coordinate descent algorithm is implemented.
maxiter : integer
The maximum number of iteration cycles (an iteration cycle
involves running coordinate descent on all variables).
alpha : scalar or array-like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
start_params : array-like
Starting values for `params`.
cnvrg_tol : scalar
If `params` changes by less than this amount (in sup-norm)
in once iteration cycle, the algorithm terminates with
convergence.
zero_tol : scalar
Any estimated coefficient smaller than this value is
replaced with zero.
Returns
-------
A PHregResults object, of the same type returned by `fit`.
Notes
-----
The penalty is the"elastic net" penalty, which
is a convex combination of L1 and L2 penalties.
The function that is minimized is: ..math::
-loglike/n + alpha*((1-L1_wt)*|params|_2^2/2 + L1_wt*|params|_1)
where :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
"""
k_exog = self.exog.shape[1]
n_exog = self.exog.shape[0]
if np.isscalar(alpha):
alpha = alpha * np.ones(k_exog, dtype=np.float64)
# regularization cannot be used with groups
self.groups = None
# Define starting params
if start_params is None:
params = np.zeros(k_exog, dtype=np.float64)
else:
params = start_params.copy()
# Maybe could be a shallow copy, but just in case...
import copy
surv = copy.deepcopy(self.surv)
# This is the base offset, onto which the effects of
# constrained variables are added.
if self.offset is None:
offset_s_base = [np.zeros(len(x)) for x in surv.stratum_rows]
surv.offset_s = [x.copy() for x in offset_s_base]
else:
offset_s_base = [x.copy() for x in surv.offset_s]
# Create a model instance for optimizing a single variable
model_1var = copy.deepcopy(self)
model_1var.surv = surv
model_1var.ties = self.ties
# All the negative penalized loglikeihood functions.
def gen_npfuncs(k):
def nploglike(params):
pen = alpha[k]*((1 - L1_wt)*params**2/2 + L1_wt*np.abs(params))
return -model_1var.loglike(np.r_[params]) / n_exog + pen
def npscore(params):
pen_grad = alpha[k]*(1 - L1_wt)*params
return -model_1var.score(np.r_[params])[0] / n_exog + pen_grad
def nphess(params):
pen_hess = alpha[k]*(1 - L1_wt)
return -model_1var.hessian(np.r_[params])[0,0] / n_exog + pen_hess
return nploglike, npscore, nphess
nploglike_funcs = [gen_npfuncs(k) for k in range(len(params))]
# 1-dimensional exog's
exog_s = []
for k in range(k_exog):
ex = [x[:, k][:, None] for x in surv.exog_s]
exog_s.append(ex)
converged = False
btol = 1e-8
params_zero = np.zeros(len(params), dtype=bool)
for itr in range(maxiter):
# Sweep through the parameters
params_save = params.copy()
for k in range(k_exog):
# Under the active set method, if a parameter becomes
# zero we don't try to change it again.
if params_zero[k]:
continue
# Set exog to include only the variable whose effect
# is being estimated.
surv.exog_s = exog_s[k]
# Set the offset to account for the variables that are
# being held fixed.
params0 = params.copy()
params0[k] = 0
for stx in range(self.surv.nstrat):
v = np.dot(self.surv.exog_s[stx], params0)
surv.offset_s[stx] = offset_s_base[stx] + v
params[k] = _opt_1d(nploglike_funcs[k], params[k],
alpha[k]*L1_wt, tol=btol)
# Update the active set
if itr > 0 and np.abs(params[k]) < zero_tol:
params_zero[k] = True
params[k] = 0.
# Check for convergence
pchange = np.max(np.abs(params - params_save))
if pchange < cnvrg_tol:
converged = True
break
# Set approximate zero coefficients to be exactly zero
params *= np.abs(params) >= zero_tol
# Fit the reduced model to get standard errors and other
# post-estimation results.
ii = np.flatnonzero(params)
cov = np.zeros((k_exog, k_exog), dtype=np.float64)
if len(ii) > 0:
model = self.__class__(self.endog, self.exog[:, ii],
status=self.status, entry=self.entry,
strata=self.strata, offset=self.offset,
ties=self.ties, missing=self.missing)
rslt = model.fit()
cov[np.ix_(ii, ii)] = rslt.normalized_cov_params
rfit = PHRegResults(self, params, cov_params=cov)
rfit.converged = converged
rfit.regularized = True
return rfit
def loglike(self, params):
"""
Returns the log partial likelihood function evaluated at
`params`.
"""
if self.ties == "breslow":
return self.breslow_loglike(params)
elif self.ties == "efron":
return self.efron_loglike(params)
def score(self, params):
"""
Returns the score function evaluated at `params`.
"""
if self.ties == "breslow":
return self.breslow_gradient(params)
elif self.ties == "efron":
return self.efron_gradient(params)
def hessian(self, params):
"""
Returns the Hessian matrix of the log partial likelihood
function evaluated at `params`.
"""
if self.ties == "breslow":
return self.breslow_hessian(params)
else:
return self.efron_hessian(params)
def breslow_loglike(self, params):
"""
Returns the value of the log partial likelihood function
evaluated at `params`, using the Breslow method to handle tied
times.
"""
surv = self.surv
like = 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0 = 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
like += (linpred[ix] - np.log(xp0)).sum()
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
return like
def efron_loglike(self, params):
"""
Returns the value of the log partial likelihood function
evaluated at `params`, using the Efron method to handle tied
times.
"""
surv = self.surv
like = 0.
# Loop over strata
for stx in range(surv.nstrat):
# exog and linear predictor for this stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0 = 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
xp0f = e_linpred[uft_ix[i]].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
like += linpred[ix].sum()
m = len(ix)
J = np.arange(m, dtype=np.float64) / m
like -= np.log(xp0 - J*xp0f).sum()
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
return like
def breslow_gradient(self, params):
"""
Returns the gradient of the log partial likelihood, using the
Breslow method to handle tied times.
"""
surv = self.surv
grad = 0.
# Loop over strata
for stx in range(surv.nstrat):
# Indices of subjects in the stratum
strat_ix = surv.stratum_rows[stx]
# Unique failure times in the stratum
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
# exog and linear predictor for the stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1 = 0., 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 += e_linpred[ix].sum()
xp1 += (e_linpred[ix][:,None] * v).sum(0)
# Account for all cases that fail at this point.
ix = uft_ix[i]
grad += (exog_s[ix,:] - xp1 / xp0).sum(0)
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 -= e_linpred[ix].sum()
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
return grad
def efron_gradient(self, params):
"""
Returns the gradient of the log partial likelihood evaluated
at `params`, using the Efron method to handle tied times.
"""
surv = self.surv
grad = 0.
# Loop over strata
for stx in range(surv.nstrat):
# Indices of cases in the stratum
strat_ix = surv.stratum_rows[stx]
# exog and linear predictor of the stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1 = 0., 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 += e_linpred[ix].sum()
xp1 += (e_linpred[ix][:,None] * v).sum(0)
ixf = uft_ix[i]
if len(ixf) > 0:
v = exog_s[ixf,:]
xp0f = e_linpred[ixf].sum()
xp1f = (e_linpred[ixf][:,None] * v).sum(0)
# Consider all cases that fail at this point.
grad += v.sum(0)
m = len(ixf)
J = np.arange(m, dtype=np.float64) / m
numer = xp1 - np.outer(J, xp1f)
denom = xp0 - np.outer(J, xp0f)
ratio = numer / denom
rsum = ratio.sum(0)
grad -= rsum
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 -= e_linpred[ix].sum()
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
return grad
def breslow_hessian(self, params):
"""
Returns the Hessian of the log partial likelihood evaluated at
`params`, using the Breslow method to handle tied times.
"""
surv = self.surv
hess = 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1, xp2 = 0., 0., 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
xp0 += e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 += (e_linpred[ix][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ix]
xp2 += (mat.T * mat * elx[None,:,None]).sum(1)
# Account for all cases that fail at this point.
m = len(uft_ix[i])
hess += m*(xp2 / xp0 - np.outer(xp1, xp1) / xp0**2)
# Update for new cases entering the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
xp0 -= e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ix]
xp2 -= (mat.T * mat * elx[None,:,None]).sum(1)
return -hess
def efron_hessian(self, params):
"""
Returns the Hessian matrix of the partial log-likelihood
evaluated at `params`, using the Efron method to handle tied
times.
"""
surv = self.surv
hess = 0.
# Loop over strata
for stx in range(surv.nstrat):
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1, xp2 = 0., 0., 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
xp0 += e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 += (e_linpred[ix][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ix]
xp2 += (mat.T * mat * elx[None,:,None]).sum(1)
ixf = uft_ix[i]
if len(ixf) > 0:
v = exog_s[ixf,:]
xp0f = e_linpred[ixf].sum()
xp1f = (e_linpred[ixf][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ixf]
xp2f = (mat.T * mat * elx[None,:,None]).sum(1)
# Account for all cases that fail at this point.
m = len(uft_ix[i])
J = np.arange(m, dtype=np.float64) / m
c0 = xp0 - J*xp0f
mat = (xp2[None,:,:] - J[:,None,None]*xp2f) / c0[:,None,None]
hess += mat.sum(0)
mat = (xp1[None, :] - np.outer(J, xp1f)) / c0[:, None]
mat = mat[:, :, None] * mat[:, None, :]
hess -= mat.sum(0)
# Update for new cases entering the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
xp0 -= e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
mat = v[None,:,:]
elx = e_linpred[ix]
xp2 -= (mat.T * mat * elx[None,:,None]).sum(1)
return -hess
def robust_covariance(self, params):
"""
Returns a covariance matrix for the proportional hazards model
regresion coefficient estimates that is robust to certain
forms of model misspecification.
Parameters
----------
params : ndarray
The parameter vector at which the covariance matrix is
calculated.
Returns
-------
The robust covariance matrix as a square ndarray.
Notes
-----
This function uses the `groups` argument to determine groups
within which observations may be dependent. The covariance
matrix is calculated using the Huber-White "sandwich" approach.
"""
if self.groups is None:
raise ValueError("`groups` must be specified to calculate the robust covariance matrix")
hess = self.hessian(params)
score_obs = self.score_residuals(params)
# Collapse
grads = {}
for i,g in enumerate(self.groups):
if g not in grads:
grads[g] = 0.
grads[g] += score_obs[i, :]
grads = np.asarray(list(grads.values()))
mat = grads[None, :, :]
mat = mat.T * mat
mat = mat.sum(1)
hess_inv = np.linalg.inv(hess)
cmat = np.dot(hess_inv, np.dot(mat, hess_inv))
return cmat
def score_residuals(self, params):
"""
Returns the score residuals calculated at a given vector of
parameters.
Parameters
----------
params : ndarray
The parameter vector at which the score residuals are
calculated.
Returns
-------
The score residuals, returned as a ndarray having the same
shape as `exog`.
Notes
-----
Observations in a stratum with no observed events have undefined
score residuals, and contain NaN in the returned matrix.
"""
surv = self.surv
score_resid = np.zeros(self.exog.shape, dtype=np.float64)
# Use to set undefined values to NaN.
mask = np.zeros(self.exog.shape[0], dtype=np.int32)
w_avg = self.weighted_covariate_averages(params)
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
strat_ix = surv.stratum_rows[stx]
xp0 = 0.
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
at_risk_ix = set([])
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
at_risk_ix |= set(ix)
xp0 += e_linpred[ix].sum()
atr_ix = list(at_risk_ix)
leverage = exog_s[atr_ix, :] - w_avg[stx][i, :]
# Event indicators
d = np.zeros(exog_s.shape[0])
d[uft_ix[i]] = 1
# The increment in the cumulative hazard
dchaz = len(uft_ix[i]) / xp0
# Piece of the martingale residual
mrp = d[atr_ix] - e_linpred[atr_ix] * dchaz
# Update the score residuals
ii = strat_ix[atr_ix]
score_resid[ii,:] += leverage * mrp[:, None]
mask[ii] = 1
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
at_risk_ix -= set(ix)
xp0 -= e_linpred[ix].sum()
jj = np.flatnonzero(mask == 0)
if len(jj) > 0:
score_resid[jj, :] = np.nan
return score_resid
def weighted_covariate_averages(self, params):
"""
Returns the hazard-weighted average of covariate values for
subjects who are at-risk at a particular time.
Parameters
----------
params : ndarray
Parameter vector
Returns
-------
averages : list of ndarrays
averages[stx][i,:] is a row vector containing the weighted
average values (for all the covariates) of at-risk
subjects a the i^th largest observed failure time in
stratum `stx`, using the hazard multipliers as weights.
Notes
-----
Used to calculate leverages and score residuals.
"""
surv = self.surv
averages = []
xp0, xp1 = 0., 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
average_s = np.zeros((len(uft_ix), exog_s.shape[1]),
dtype=np.float64)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
xp1 += np.dot(e_linpred[ix], exog_s[ix, :])
average_s[i, :] = xp1 / xp0
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
xp1 -= np.dot(e_linpred[ix], exog_s[ix, :])
averages.append(average_s)
return averages
def baseline_cumulative_hazard(self, params):
"""
Estimate the baseline cumulative hazard and survival
functions.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A list of triples (time, hazard, survival) containing the time
values and corresponding cumulative hazard and survival
function values for each stratum.
Notes
-----
Uses the Nelson-Aalen estimator.
"""
# TODO: some disagreements with R, not the same algorithm but
# hard to deduce what R is doing. Our results are reasonable.
surv = self.surv
rslt = []
# Loop over strata
for stx in range(surv.nstrat):
uft = surv.ufailt[stx]
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
xp0 = 0.
h0 = np.zeros(nuft, dtype=np.float64)
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
h0[i] = len(ix) / xp0
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
cumhaz = np.cumsum(h0) - h0
surv = np.exp(-cumhaz)
rslt.append([uft, cumhaz, surv])
return rslt
def baseline_cumulative_hazard_function(self, params):
"""
Returns a function that calculates the baseline cumulative
hazard function for each stratum.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A dict mapping stratum names to the estimated baseline
cumulative hazard function.
"""
from scipy.interpolate import interp1d
surv = self.surv
base = self.baseline_cumulative_hazard(params)
cumhaz_f = {}
for stx in range(surv.nstrat):
time_h = base[stx][0]
cumhaz = base[stx][1]
time_h = np.r_[-np.inf, time_h, np.inf]
cumhaz = np.r_[cumhaz[0], cumhaz, cumhaz[-1]]
func = interp1d(time_h, cumhaz, kind='zero')
cumhaz_f[self.surv.stratum_names[stx]] = func
return cumhaz_f
def predict(self, params, exog=None, cov_params=None, endog=None,
strata=None, offset=None, pred_type="lhr"):
# docstring attached below
pred_type = pred_type.lower()
if pred_type not in ["lhr", "hr", "surv", "cumhaz"]:
msg = "Type %s not allowed for prediction" % pred_type
raise ValueError(msg)
class bunch:
predicted_values = None
standard_errors = None
ret_val = bunch()
# Don't do anything with offset here because we want to allow
# different offsets to be specified even if exog is the model
# exog.
exog_provided = True
if exog is None:
exog = self.exog
exog_provided = False
lhr = np.dot(exog, params)
if offset is not None:
lhr += offset
# Never use self.offset unless we are also using self.exog
elif self.offset is not None and not exog_provided:
lhr += self.offset
# Handle lhr and hr prediction first, since they don't make
# use of the hazard function.
if pred_type == "lhr":
ret_val.predicted_values = lhr
if cov_params is not None:
mat = np.dot(exog, cov_params)
va = (mat * exog).sum(1)
ret_val.standard_errors = np.sqrt(va)
return ret_val
hr = np.exp(lhr)
if pred_type == "hr":
ret_val.predicted_values = hr
return ret_val
# Makes sure endog is defined
if endog is None and exog_provided:
msg = "If `exog` is provided `endog` must be provided."
raise ValueError(msg)
# Use model endog if using model exog
elif endog is None and not exog_provided:
endog = self.endog
# Make sure strata is defined
if strata is None:
if exog_provided and self.surv.nstrat > 1:
raise ValueError("`strata` must be provided")
if self.strata is None:
strata = [self.surv.stratum_names[0],] * len(endog)
else:
strata = self.strata
cumhaz = np.nan * np.ones(len(endog), dtype=np.float64)
stv = np.unique(strata)
bhaz = self.baseline_cumulative_hazard_function(params)
for stx in stv:
ix = np.flatnonzero(strata == stx)
func = bhaz[stx]
cumhaz[ix] = func(endog[ix]) * hr[ix]
if pred_type == "cumhaz":
ret_val.predicted_values = cumhaz
elif pred_type == "surv":
ret_val.predicted_values = np.exp(-cumhaz)
return ret_val
predict.__doc__ = _predict_docstring % {'cov_params_doc': _predict_cov_params_docstring}
def get_distribution(self, params):
"""
Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Parameters
----------
params : arrayh-like
The model proportional hazards model parameters.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times wihtin a stratum.
"""
# TODO: this returns a Python list of rv_discrete objects, so
# nothing can be vectorized. It appears that rv_discrete does
# not allow vectorization.
from scipy.stats.distributions import rv_discrete
surv = self.surv
bhaz = self.baseline_cumulative_hazard(params)
# The arguments to rv_discrete_float, first obtained by
# stratum
pk, xk = [], []
for stx in range(self.surv.nstrat):
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
# The unique failure times for this stratum (the support
# of the distribution).
pts = bhaz[stx][0]
# The individual cumulative hazards for everyone in this
# stratum.
ichaz = np.outer(e_linpred, bhaz[stx][1])
# The individual survival functions.
usurv = np.exp(-ichaz)
usurv = np.concatenate((usurv, np.zeros((usurv.shape[0], 1))),
axis=1)
# The individual survival probability masses.
probs = -np.diff(usurv, 1)
pk.append(probs)
xk.append(np.outer(np.ones(probs.shape[0]), pts))
# Pad to make all strata have the same shape
mxc = max([x.shape[1] for x in xk])
for k in range(self.surv.nstrat):
if xk[k].shape[1] < mxc:
xk1 = np.zeros((xk.shape[0], mxc))
pk1 = np.zeros((pk.shape[0], mxc))
xk1[:, -mxc:] = xk
pk1[:, -mxc:] = pk
xk[k], pk[k] = xk1, pk1
xka = np.nan * np.zeros((len(self.endog), mxc), dtype=np.float64)
pka = np.ones((len(self.endog), mxc), dtype=np.float64) / mxc
for stx in range(self.surv.nstrat):
ix = self.surv.stratum_rows[stx]
xka[ix, :] = xk[stx]
pka[ix, :] = pk[stx]
dist = rv_discrete_float(xka, pka)
return dist
class PHRegResults(base.LikelihoodModelResults):
'''
Class to contain results of fitting a Cox proportional hazards
survival model.
PHregResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelResults
Returns
-------
**Attributes**
model : class instance
PHreg model instance that called fit.
normalized_cov_params : array
The sampling covariance matrix of the estimates
params : array
The coefficients of the fitted model. Each coefficient is the
log hazard ratio corresponding to a 1 unit difference in a
single covariate while holding the other covariates fixed.
bse : array
The standard errors of the fitted parameters.
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, cov_params, covariance_type="naive"):
self.covariance_type = covariance_type
super(PHRegResults, self).__init__(model, params,
normalized_cov_params=cov_params)
@cache_readonly
def standard_errors(self):
"""
Returns the standard errors of the parameter estimates.
"""
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def bse(self):
"""
Returns the standard errors of the parameter estimates.
"""
return self.standard_errors
def get_distribution(self):
"""
Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times wihtin a stratum.
"""
return self.model.get_distribution(self.params)
def predict(self, endog=None, exog=None, strata=None,
offset=None, transform=True, pred_type="lhr"):
# docstring attached below
return super(PHRegResults, self).predict(exog=exog,
transform=transform,
cov_params=self.cov_params(),
endog=endog,
strata=strata,
offset=offset,
pred_type=pred_type)
predict.__doc__ = _predict_docstring % {'cov_params_doc': ''}
def _group_stats(self, groups):
"""
Descriptive statistics of the groups.
"""
gsize = {}
for x in groups:
if x not in gsize:
gsize[x] = 0
gsize[x] += 1
gsize = np.asarray(gsize.values())
return gsize.min(), gsize.max(), gsize.mean()
@cache_readonly
def weighted_covariate_averages(self):
"""
The average covariate values within the at-risk set at each
event time point, weighted by hazard.
"""
return self.model.weighted_covariate_averages(self.params)
@cache_readonly
def score_residuals(self):
"""
A matrix containing the score residuals.
"""
return self.model.score_residuals(self.params)
@cache_readonly
def baseline_cumulative_hazard(self):
"""
A list (corresponding to the strata) containing the baseline
cumulative hazard function evaluated at the event points.
"""
return self.model.baseline_cumulative_hazard(self.params)
@cache_readonly
def baseline_cumulative_hazard_function(self):
"""
A list (corresponding to the strata) containing function
objects that calculate the cumulative hazard function.
"""
return self.model.baseline_cumulative_hazard_function(self.params)
@cache_readonly
def schoenfeld_residuals(self):
"""
A matrix containing the Schoenfeld residuals.
Notes
-----
Schoenfeld residuals for censored observations are set to zero.
"""
surv = self.model.surv
w_avg = self.weighted_covariate_averages
# Initialize at NaN since rows that belong to strata with no
# events have undefined residuals.
sch_resid = np.nan*np.ones(self.model.exog.shape, dtype=np.float64)
# Loop over strata
for stx in range(surv.nstrat):
uft = surv.ufailt[stx]
exog_s = surv.exog_s[stx]
time_s = surv.time_s[stx]
strat_ix = surv.stratum_rows[stx]
ii = np.searchsorted(uft, time_s)
# These subjects are censored after the last event in
# their stratum, so have empty risk sets and undefined
# residuals.
jj = np.flatnonzero(ii < len(uft))
sch_resid[strat_ix[jj], :] = exog_s[jj, :] - w_avg[stx][ii[jj], :]
jj = np.flatnonzero(self.model.status == 0)
sch_resid[jj, :] = np.nan
return sch_resid
@cache_readonly
def martingale_residuals(self):
"""
The martingale residuals.
"""
surv = self.model.surv
# Initialize at NaN since rows that belong to strata with no
# events have undefined residuals.
mart_resid = np.nan*np.ones(len(self.model.endog), dtype=np.float64)
cumhaz_f_list = self.baseline_cumulative_hazard_function
# Loop over strata
for stx in range(surv.nstrat):
cumhaz_f = cumhaz_f_list[stx]
exog_s = surv.exog_s[stx]
time_s = surv.time_s[stx]
linpred = np.dot(exog_s, self.params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
ii = surv.stratum_rows[stx]
chaz = cumhaz_f(time_s)
mart_resid[ii] = self.model.status[ii] - e_linpred * chaz
return mart_resid
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the proportional hazards regression results.
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `x#` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
from statsmodels.iolib import summary2
from statsmodels.compat.collections import OrderedDict
smry = summary2.Summary()
float_format = "%8.3f"
info = OrderedDict()
info["Model:"] = "PH Reg"
if yname is None:
yname = self.model.endog_names
info["Dependent variable:"] = yname
info["Ties:"] = self.model.ties.capitalize()
info["Sample size:"] = str(self.model.surv.n_obs)
info["Num. events:"] = str(int(sum(self.model.status)))
if self.model.groups is not None:
mn, mx, avg = self._group_stats(self.model.groups)
info["Max. group size:"] = str(mx)
info["Min. group size:"] = str(mn)
info["Avg. group size:"] = str(avg)
smry.add_dict(info, align='l', float_format=float_format)
param = summary2.summary_params(self, alpha=alpha)
param = param.rename(columns={"Coef.": "log HR",
"Std.Err.": "log HR SE"})
param.insert(2, "HR", np.exp(param["log HR"]))
a = "[%.3f" % (alpha / 2)
param.loc[:, a] = np.exp(param.loc[:, a])
a = "%.3f]" % (1 - alpha / 2)
param.loc[:, a] = np.exp(param.loc[:, a])
if xname != None:
param.index = xname
smry.add_df(param, float_format=float_format)
smry.add_title(title=title, results=self)
smry.add_text("Confidence intervals are for the hazard ratios")
if self.model.groups is not None:
smry.add_text("Standard errors account for dependence within groups")
if hasattr(self, "regularized"):
smry.add_text("Standard errors do not account for the regularization")
return smry
class rv_discrete_float(object):
"""
A class representing a collection of discrete distributions.
Parameters
----------
xk : 2d array-like
The support points, should be non-decreasing within each
row.
pk : 2d array-like
The probabilities, should sum to one within each row.
Notes
-----
Each row of `xk`, and the corresponding row of `pk` describe a
discrete distribution.
`xk` and `pk` should both be two-dimensional ndarrays. Each row
of `pk` should sum to 1.
This class is used as a substitute for scipy.distributions.
rv_discrete, since that class does not allow non-integer support
points, or vectorized operations.
Only a limited number of methods are implemented here compared to
the other scipy distribution classes.
"""
def __init__(self, xk, pk):
self.xk = xk
self.pk = pk
self.cpk = np.cumsum(self.pk, axis=1)
def rvs(self):
"""
Returns a random sample from the discrete distribution.
A vector is returned containing a single draw from each row of
`xk`, using the probabilities of the corresponding row of `pk`
"""
n = self.xk.shape[0]
u = np.random.uniform(size=n)
ix = (self.cpk < u[:, None]).sum(1)
ii = np.arange(n, dtype=np.int32)
return self.xk[(ii,ix)]
def mean(self):
"""
Returns a vector containing the mean values of the discrete
distributions.
A vector is returned containing the mean value of each row of
`xk`, using the probabilities in the corresponding row of
`pk`.
"""
return (self.xk * self.pk).sum(1)
def var(self):
"""
Returns a vector containing the variances of the discrete
distributions.
A vector is returned containing the variance for each row of
`xk`, using the probabilities in the corresponding row of
`pk`.
"""
mn = self.mean()
xkc = self.xk - mn[:, None]
return (self.pk * (self.xk - xkc)**2).sum(1)
def std(self):
"""
Returns a vector containing the standard deviations of the
discrete distributions.
A vector is returned containing the standard deviation for
each row of `xk`, using the probabilities in the corresponding
row of `pk`.
"""
return np.sqrt(self.var())
def _opt_1d(funcs, start, L1_wt, tol):
"""
Optimize a L1-penalized smooth one-dimensional function of a
single variable.
Parameters
----------
funcs : tuple of functions
funcs[0] is the objective function to be minimized. funcs[1]
and funcs[2] are, respectively, the first and second
derivatives of the smooth part of funcs[0] (i.e. excluding the
L1 penalty).
start : real
A starting value for the function argument
L1_wt : non-negative real
The weight for the L1 penalty function.
tol : non-negative real
A convergence threshold.
Returns
-------
The argmin of the objective function.
"""
# TODO: can we detect failures without calling funcs[0] twice?
x = start
f = funcs[0](x)
b = funcs[1](x)
c = funcs[2](x)
d = b - c*x
if L1_wt > np.abs(d):
return 0.
elif d >= 0:
x += (L1_wt - b) / c
elif d < 0:
x -= (L1_wt + b) / c
f1 = funcs[0](x)
# This is an expensive fall-back if the quadratic
# approximation is poor and sends us far off-course.
if f1 > f + 1e-10:
return brent(funcs[0], brack=(x-0.2, x+0.2), tol=tol)
return x
| bsd-3-clause |
pletisan/python-data-viz-cookbook | 3367OS_Code/3367OS_05_Code/ch05/ch05_rec03_matplotlib_anim.py | 1 | 1071 | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
fig = plt.figure()
ax = plt.axes(xlim=(0, 2), ylim=(-2, 2))
line, = ax.plot([], [], lw=2)
def init():
"""Clears current frame."""
line.set_data([], [])
return line,
def animate(i):
"""Draw figure.
@param i: Frame counter
@type i: int
"""
x = np.linspace(0, 2, 1000)
y = np.sin(2 * np.pi * (x - 0.01 * i)) * np.cos(22 * np.pi * (x - 0.01 * i))
line.set_data(x, y)
return line,
# This call puts the work in motion
# connecting init and animate functions and figure we want to draw
animator = animation.FuncAnimation(fig, animate, init_func=init,
frames=200, interval=20, blit=True)
# This call creates the video file.
# Temporary, every frame is saved as PNG file
# and later processed by ffmpeg encoder into MPEG4 file
# we can pass various arguments to ffmpeg via extra_args
animator.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'],
writer='ffmpeg_file')
plt.show()
| mit |
Titan-C/scikit-learn | sklearn/pipeline.py | 16 | 29778 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# License: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import clone, TransformerMixin
from .externals.joblib import Parallel, delayed, Memory
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .utils import Bunch
from .utils.metaestimators import _BaseComposition
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(_BaseComposition):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
to None.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
memory : Instance of sklearn.external.joblib.Memory or string, optional \
(default=None)
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
Attributes
----------
named_steps : bunch object, a dictionary with attribute access
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Pipeline(memory=None,
steps=[('anova', SelectKBest(...)),
('svc', SVC(...))])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.829...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([False, False, True, True, False, False, True, True, False,
True, False, True, True, False, True, False, True, True,
False, False], dtype=bool)
>>> # Another way to get selected features chosen by anova_filter
>>> anova_svm.named_steps.anova.get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([False, False, True, True, False, False, True, True, False,
True, False, True, True, False, True, False, True, True,
False, False], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps, memory=None):
# shallow copy of steps
self.steps = tosequence(steps)
self._validate_steps()
self.memory = memory
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('steps', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('steps', **kwargs)
return self
def _validate_steps(self):
names, estimators = zip(*self.steps)
# validate names
self._validate_names(names)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None:
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps should be "
"transformers and implement fit and transform."
" '%s' (type %s) doesn't" % (t, type(t)))
# We allow last estimator to be None as an identity transformation
if estimator is not None and not hasattr(estimator, "fit"):
raise TypeError("Last step of Pipeline should implement fit. "
"'%s' (type %s) doesn't"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
@property
def named_steps(self):
# Use Bunch object to improve autocomplete
return Bunch(**dict(self.steps))
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _fit(self, X, y=None, **fit_params):
self._validate_steps()
# Setup the memory
memory = self.memory
if memory is None:
memory = Memory(cachedir=None, verbose=0)
elif isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
elif not isinstance(memory, Memory):
raise ValueError("'memory' should either be a string or"
" a sklearn.externals.joblib.Memory"
" instance, got 'memory={!r}' instead.".format(
type(memory)))
fit_transform_one_cached = memory.cache(_fit_transform_one)
fit_params_steps = dict((name, {}) for name, step in self.steps
if step is not None)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for step_idx, (name, transformer) in enumerate(self.steps[:-1]):
if transformer is None:
pass
else:
if memory.cachedir is None:
# we do not clone when caching is disabled to preserve
# backward compatibility
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
# Fit or load from cache the current transfomer
Xt, fitted_transformer = fit_transform_one_cached(
cloned_transformer, None, Xt, y,
**fit_params_steps[name])
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
# from the cache.
self.steps[step_idx] = (name, fitted_transformer)
if self._final_estimator is None:
return Xt, {}
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit the model
Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : Pipeline
This estimator
"""
Xt, fit_params = self._fit(X, y, **fit_params)
if self._final_estimator is not None:
self._final_estimator.fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit the model and transform with the final estimator
Fits all the transforms one after the other and transforms the
data, then uses fit_transform on transformed data with the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like, shape = [n_samples, n_transformed_features]
Transformed samples
"""
last_step = self._final_estimator
Xt, fit_params = self._fit(X, y, **fit_params)
if hasattr(last_step, 'fit_transform'):
return last_step.fit_transform(Xt, y, **fit_params)
elif last_step is None:
return Xt
else:
return last_step.fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Apply transforms to the data, and predict with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_pred : array-like
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
y_pred : array-like
"""
Xt, fit_params = self._fit(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Apply transforms, and predict_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_proba : array-like, shape = [n_samples, n_classes]
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Apply transforms, and decision_function of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Apply transforms, and predict_log_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@property
def transform(self):
"""Apply transforms, and transform with the final estimator
This also works where final estimator is ``None``: all prior
transformations are applied.
Parameters
----------
X : iterable
Data to transform. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
Xt : array-like, shape = [n_samples, n_transformed_features]
"""
# _final_estimator is None or has transform, otherwise attribute error
if self._final_estimator is not None:
self._final_estimator.transform
return self._transform
def _transform(self, X):
Xt = X
for name, transform in self.steps:
if transform is not None:
Xt = transform.transform(Xt)
return Xt
@property
def inverse_transform(self):
"""Apply inverse transformations in reverse order
All estimators in the pipeline must support ``inverse_transform``.
Parameters
----------
Xt : array-like, shape = [n_samples, n_transformed_features]
Data samples, where ``n_samples`` is the number of samples and
``n_features`` is the number of features. Must fulfill
input requirements of last step of pipeline's
``inverse_transform`` method.
Returns
-------
Xt : array-like, shape = [n_samples, n_features]
"""
# raise AttributeError if necessary for hasattr behaviour
for name, transform in self.steps:
if transform is not None:
transform.inverse_transform
return self._inverse_transform
def _inverse_transform(self, X):
Xt = X
for name, transform in self.steps[::-1]:
if transform is not None:
Xt = transform.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None, sample_weight=None):
"""Apply transforms, and score with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
Returns
-------
score : float
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
score_params = {}
if sample_weight is not None:
score_params['sample_weight'] = sample_weight
return self.steps[-1][-1].score(Xt, y, **score_params)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps, **kwargs):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Parameters
----------
*steps : list of estimators,
memory : Instance of sklearn.externals.joblib.Memory or string, optional \
(default=None)
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
... # doctest: +NORMALIZE_WHITESPACE
Pipeline(memory=None,
steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB(priors=None))])
Returns
-------
p : Pipeline
"""
memory = kwargs.pop('memory', None)
if kwargs:
raise TypeError('Unknown keyword arguments: "{}"'
.format(list(kwargs.keys())[0]))
return Pipeline(_name_estimators(steps), memory=memory)
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, weight, X):
res = transformer.transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res
return res * weight
def _fit_transform_one(transformer, weight, X, y,
**fit_params):
if hasattr(transformer, 'fit_transform'):
res = transformer.fit_transform(X, y, **fit_params)
else:
res = transformer.fit(X, y, **fit_params).transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res, transformer
return res * weight, transformer
class FeatureUnion(_BaseComposition, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters of the transformers may be set using its name and the parameter
name separated by a '__'. A transformer may be replaced entirely by
setting the parameter with its name to another transformer,
or removed by setting to ``None``.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list : list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
transformer_weights : dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = tosequence(transformer_list)
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self._validate_transformers()
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('transformer_list', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('transformer_list', **kwargs)
return self
def _validate_transformers(self):
names, transformers = zip(*self.transformer_list)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t is None:
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All estimators should implement fit and "
"transform. '%s' (type %s) doesn't" %
(t, type(t)))
def _iter(self):
"""Generate (name, est, weight) tuples excluding None transformers
"""
get_weight = (self.transformer_weights or {}).get
return ((name, trans, get_weight(name))
for name, trans in self.transformer_list
if trans is not None)
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans, weight in self._iter():
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s (type %s) does not "
"provide get_feature_names."
% (str(name), type(trans).__name__))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : FeatureUnion
This estimator
"""
self._validate_transformers()
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for _, trans, _ in self._iter())
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, weight, X, y,
**fit_params)
for name, trans, weight in self._iter())
if not result:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, weight, X)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def _update_transformer_list(self, transformers):
transformers = iter(transformers)
self.transformer_list[:] = [
(name, None if old is None else next(transformers))
for name, old in self.transformer_list
]
def make_union(*transformers, **kwargs):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Parameters
----------
*transformers : list of estimators
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
Returns
-------
f : FeatureUnion
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca',
PCA(copy=True, iterated_power='auto',
n_components=None, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
"""
n_jobs = kwargs.pop('n_jobs', 1)
if kwargs:
# We do not currently support `transformer_weights` as we may want to
# change its type spec in make_union
raise TypeError('Unknown keyword arguments: "{}"'
.format(list(kwargs.keys())[0]))
return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs)
| bsd-3-clause |
huongttlan/mpld3 | doc/sphinxext/plot_generator.py | 19 | 10614 | import sys
import os
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg') # don't display plots
import mpld3
from matplotlib import image
from matplotlib.figure import Figure
class disable_mpld3(object):
"""Context manager to temporarily disable mpld3.show() command"""
def __enter__(self):
self.show = mpld3.show
mpld3.show = lambda *args, **kwargs: None
return self
def __exit__(self, type, value, traceback):
mpld3.show = self.show
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. raw:: html
{img_html}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
display: inline;
width: 170px;
height: 170px;
opacity:0.4;
filter:alpha(opacity=40); /* For IE8 and earlier */
}}
.figure img:hover
{{
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure .caption {{
width: 180px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example Gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
BANNER_JS_TEMPLATE = """
var banner_data = {banner_data};
banner_data.forEach(function(d, i) {{
d.i = i;
}});
var height = 150,
width = 900,
imageHeight = 150,
imageWidth = 150,
zoomfactor = 0.1;
var banner = d3.select(".example-banner");
banner.style("height", height + "px")
.style("width", width + "px")
.style("margin-left", "auto")
.style("margin-right", "auto");
var svg = banner.append("svg")
.attr("width", width + "px")
.attr("height", height + "px");
var anchor = svg.append("g")
.attr("class", "example-anchor")
.selectAll("a")
.data(banner_data.slice(0, 7));
anchor.exit().remove();
var anchor_elements = anchor.enter().append("a")
.attr("xlink:href", function(d) {{ return d.url; }})
.attr("xlink:title", function(d) {{ return d.title; }});
anchor_elements.append("svg:image")
.attr("width", (1 - zoomfactor) * imageWidth)
.attr("height", (1 - zoomfactor) * imageHeight)
.attr("xlink:href", function(d){{ return d.thumb; }})
.attr("xroot", function(d){{return d3.round(imageWidth * (d.i - 0.5));}})
.attr("x", function(d){{return d3.round(imageWidth * (d.i - 0.5));}})
.attr("y", d3.round(0.5 * zoomfactor * imageHeight))
.attr("i", function(d){{return d.i;}})
.on("mouseover", function() {{
var img = d3.select(this);
img.transition()
.attr("width", imageWidth)
.attr("height", height)
.attr("x", img.attr("xroot")
- d3.round(0.5 * zoomfactor * imageWidth))
.attr("y", 0);
}})
.on("mouseout", function() {{
var img = d3.select(this);
img.transition()
.attr("width", (1 - zoomfactor) * imageWidth)
.attr("height", (1 - zoomfactor) * height)
.attr("x", img.attr("xroot"))
.attr("y", d3.round(0.5 * zoomfactor * imageHeight));
}});
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.6, border=4):
# this doesn't really matter, it will cancel in the end, but we
# need it for the mpl API
dpi = 100
baseout, extout = os.path.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - 0.5 * width)
y0 = int(cy * rows - 0.5 * height)
thumb = im[y0: y0 + height,
x0: x0 + width]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
extension = extout.lower()
if extension == '.png':
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
elif extension == '.pdf':
from matplotlib.backends.backend_pdf \
import FigureCanvasPDF as FigureCanvas
elif extension == '.svg':
from matplotlib.backends.backend_svg \
import FigureCanvasSVG as FigureCanvas
else:
raise ValueError("Can only handle extensions 'png', 'svg' or 'pdf'")
fig = Figure(figsize=(float(width) / dpi, float(height) / dpi),
dpi=dpi)
canvas = FigureCanvas(fig)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.extract_docstring()
self.exec_file()
@property
def dirname(self):
return os.path.split(self.filename)[0]
@property
def fname(self):
return os.path.split(self.filename)[1]
@property
def modulename(self):
return os.path.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
return self.modulename + '.png'
@property
def thumbfilename(self):
# TODO: don't hard-code image path
return "_images/" + self.pngfilename
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
with disable_mpld3():
import matplotlib.pyplot as plt
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
self.html = mpld3.fig_to_html(fig)
thumbfile = os.path.join(self.target_dir,
self.pngfilename)
fig.savefig(thumbfile)
create_thumbnail(thumbfile, thumbfile)
def toctree_entry(self):
return " ./%s\n\n" % os.path.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. figure:: ./{0}\n"
" :target: ./{1}\n"
" :align: center\n\n"
" :ref:`{2}`\n\n".format(self.pngfilename,
self.htmlfilename,
self.sphinxtag))
def main(app):
static_dir = os.path.join(app.builder.srcdir, '_static')
target_dir = os.path.join(app.builder.srcdir, 'examples')
source_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'..', 'examples'))
if not os.path.exists(static_dir):
os.makedirs(static_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(os.path.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": os.path.join('examples', ex.htmlfilename),
"thumb": os.path.join(ex.thumbfilename)})
shutil.copyfile(filename, os.path.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_html=indent(ex.html, 4))
with open(os.path.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = os.path.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example-gallery",
toctree=toctree,
contents=contents))
# write javascript include for front page
js_file = os.path.join(static_dir, 'banner_data.js')
with open(js_file, 'w') as js:
js.write(BANNER_JS_TEMPLATE.format(
banner_data=json.dumps(banner_data)))
def setup(app):
app.connect('builder-inited', main)
| bsd-3-clause |
bradmontgomery/ml | book/ch02/figure2.py | 22 | 1732 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
COLOUR_FIGURE = False
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
data = load_iris()
features = data.data
feature_names = data.feature_names
target = data.target
target_names = data.target_names
# We use NumPy fancy indexing to get an array of strings:
labels = target_names[target]
is_setosa = (labels == 'setosa')
features = features[~is_setosa]
labels = labels[~is_setosa]
is_virginica = (labels == 'virginica')
# Hand fixed thresholds:
t = 1.65
t2 = 1.75
# Features to use: 3 & 2
f0, f1 = 3, 2
if COLOUR_FIGURE:
area1c = (1., .8, .8)
area2c = (.8, .8, 1.)
else:
area1c = (1., 1, 1)
area2c = (.7, .7, .7)
# Plot from 90% of smallest value to 110% of largest value
# (all feature values are positive, otherwise this would not work very well)
x0 = features[:, f0].min() * .9
x1 = features[:, f0].max() * 1.1
y0 = features[:, f1].min() * .9
y1 = features[:, f1].max() * 1.1
fig,ax = plt.subplots()
ax.fill_between([t, x1], [y0, y0], [y1, y1], color=area2c)
ax.fill_between([x0, t], [y0, y0], [y1, y1], color=area1c)
ax.plot([t, t], [y0, y1], 'k--', lw=2)
ax.plot([t2, t2], [y0, y1], 'k:', lw=2)
ax.scatter(features[is_virginica, f0],
features[is_virginica, f1], c='b', marker='o', s=40)
ax.scatter(features[~is_virginica, f0],
features[~is_virginica, f1], c='r', marker='x', s=40)
ax.set_ylim(y0, y1)
ax.set_xlim(x0, x1)
ax.set_xlabel(feature_names[f0])
ax.set_ylabel(feature_names[f1])
fig.tight_layout()
fig.savefig('figure2.png')
| mit |
sudhof/politeness | scripts/train_model.py | 3 | 2166 |
import random
import cPickle
import numpy as np
from sklearn import svm
from scipy.sparse import csr_matrix
from sklearn.metrics import classification_report
from politeness.features.vectorizer import PolitenessFeatureVectorizer
"""
Sample script to train a politeness SVM
Buckets documents by politeness score
'polite' if score > 0.0
'impolite' otherwise
Could also elect to not bucket
and treat this as a regression problem
"""
def train_svm(documents, ntesting=500):
"""
:param documents- politeness-annotated training data
:type documents- list of dicts
each document must be preprocessed and
'sentences' and 'parses' and 'score' fields.
:param ntesting- number of docs to reserve for testing
:type ntesting- int
returns fitted SVC, which can be serialized using cPickle
"""
# Generate and persist list of unigrams, bigrams
PolitenessFeatureVectorizer.generate_bow_features(documents)
# For good luck
random.shuffle(documents)
testing = documents[-ntesting:]
documents = documents[:-ntesting]
# SAVE FOR NOW
cPickle.dump(testing, open("testing-data.p", 'w'))
X, y = documents2feature_vectors(documents)
Xtest, ytest = documents2feature_vectors(testing)
print "Fitting"
clf = svm.SVC(C=0.02, kernel='linear', probability=True)
clf.fit(X, y)
# Test
y_pred = clf.predict(Xtest)
print(classification_report(ytest, y_pred))
return clf
def documents2feature_vectors(documents):
vectorizer = PolitenessFeatureVectorizer()
fks = False
X, y = [], []
for d in documents:
fs = vectorizer.features(d)
if not fks:
fks = sorted(fs.keys())
fv = [fs[f] for f in fks]
# If politeness score > 0.0,
# the doc is polite, class=1
l = 1 if d['score'] > 0.0 else 0
X.append(fv)
y.append(l)
X = csr_matrix(np.asarray(X))
y = np.asarray(y)
return X, y
if __name__ == "__main__":
"""
Train a dummy model off our 4 sample request docs
"""
from test_documents import TEST_DOCUMENTS
train_svm(TEST_DOCUMENTS, ntesting=1)
| apache-2.0 |
hms-dbmi/4DN_matrix-viewer | scripts/plot_grid.py | 2 | 1196 | #!/usr/bin/python
import json
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
plt.switch_backend('Qt5Agg')
def main():
parser = argparse.ArgumentParser(description="""
python plot_grid.py grid.json
""")
parser.add_argument('grid_filename')
#parser.add_argument('-o', '--options', default='yo',
# help="Some option", type='str')
#parser.add_argument('-u', '--useless', action='store_true',
# help='Another useless option')
args = parser.parse_args()
with open(args.grid_filename, 'r') as f:
grid = json.load(f)
data = []
counter = 0
for d in grid['data']:
try:
num = float(d)
except:
num = 0
data += [num]
counter += 1
print("counter:", counter)
data = np.array(data)
print('dimensions:', grid['dimensions'])
data = np.nan_to_num(data)
print('sum:', sum(data))
data = data.reshape(grid['dimensions'])
print('data:', data)
plt.imshow(np.log(data))
plt.show()
if __name__ == '__main__':
main()
| mit |
arbazkhan002/datasketch | benchmark/cardinality_benchmark.py | 3 | 2597 | import time, logging, random, struct
import pyhash
from datasketch.hyperloglog import HyperLogLog
from datasketch.minhash import MinHash
logging.basicConfig(level=logging.INFO)
# Produce some bytes
int_bytes = lambda x : ("a-%d-%d" % (x, x)).encode('utf-8')
class Hash(object):
def __init__(self, h):
self.h = h
def digest(self):
return struct.pack('<I', self.h)
def _gen_data(size):
return [int_bytes(i) for i in range(size)]
def _run_hyperloglog(data, seed, p):
hasher = pyhash.murmur3_32()
h = HyperLogLog(p=p, hashobj=Hash)
for d in data:
h.update(hasher(d, seed=seed))
return h.count()
def _run_minhash(data, seed, p):
hasher = pyhash.murmur3_32()
m = MinHash(num_perm=2**p, hashobj=Hash)
for d in data:
m.update(hasher(d, seed=seed))
return m.count()
def _run_test(data, n, p):
logging.info("Running HyperLogLog with p = %d" % p)
hll_runs = [_run_hyperloglog(data, i, p) for i in xrange(n)]
logging.info("Running MinHash with num_perm = %d" % 2**p)
minhash_runs = [_run_minhash(data, i, p) for i in xrange(n)]
return (hll_runs, minhash_runs)
def run_full_tests(data, n, p_list):
logging.info("Run tests with n = %d" % (n))
return [_run_test(data, n, p) for p in p_list]
def plot_hist(ax, est_cards, bins, title, exact_card):
errors = [float(exact_card - c)/float(exact_card) for c in est_cards]
errors.sort()
ax.plot(errors, 'g.', markersize=12)
# ax.hist(errors, histtype='stepfilled', facecolor='g', alpha=0.75)
ax.set_title(title)
def plot(result, p_list, exact_card, bins, save):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
num_row = 2
num_col = len(result)
basesize = 5
size = (basesize*num_col, basesize*num_row)
fig, axes = plt.subplots(num_row, num_col, sharex=True, sharey=True,
figsize=size)
for i, (hll, minhash) in enumerate(result):
title = "HyperLogLog Error Rate p = " + r"$2^{%d}$" % p_list[i]
plot_hist(axes[0][i], hll, bins, title, exact_card)
title = "MinHash Error Rate num_perm = " + r"$2^{%d}$" % p_list[i]
plot_hist(axes[1][i], minhash, bins, title, exact_card)
fig.suptitle("Exact cardinality = %d" % exact_card)
fig.savefig(save)
if __name__ == "__main__":
exact_card = 5000
data = _gen_data(exact_card)
exps = [6, 8, 10]
p_list = exps
n = 100
save = "cardinality_benchmark.png"
bins = 30
result = run_full_tests(data, n, p_list)
plot(result, p_list, exact_card, bins, save)
| mit |
adamdempsey90/fargo3d | utils/python/pyfargo.py | 1 | 19500 | import re
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp2d
"""
For an efficiently developing, add these two lines in ipython shell:
In [1]: %load_ext autoreload
In [2]: %autoreload 2
"""
class Snapshot():
def __init__(self, n, fields=['Density'], reader = 'Fargo3d', directory=None):
"""
fields is a list with the name of the fields to be loaded.
"""
for field in fields:
"""
Here, I'm creating a list of variables with the name of the root fields.
Note the interesting way to do that. I'm declaring dynamicaly the name of
variables in a secondary interpreter, that shares the same namespace of
the main program. The function exec is amazing for interactive sessions!
"""
# exec('self.' + i + "=Reader.Fargo3d(input_file = (i+'{:06d}.dat').format(n))")
# exec('self.' + field + "=Field((field+'{:06d}.dat').format(n))")
exec('self.' + field + "=Field(n=n,name=field,directory=directory)")
try:
vx = self.Vx
vy = self.Vy
except AttributeError:
# print "streams method is avalaible only for snapshots with Vx & Vy fields."
# print "If you want to use it, please add Vx & Vy in fields. ej: Snapshot(n,fields=['Vx',Vy])"
pass
class Streams():
def __init__(self,vx,vy,planet):
"""
vx & vy are Field classes.
"""
self.Vx = vx
self.Vy = vy
self.xmin = vx.parameters['xmin']
self.xmax = vx.parameters['xmax']
self.ymin = vx.parameters['ymin']
self.ymax = vx.parameters['ymax']
self.domain_x = vx.domain['x'] #xmin(i)
self.domain_y = vx.domain['y'] #ymin(j)
self.nx = vx.parameters['nx']
self.ny = vx.parameters['ny']
self.planet = planet
def bilinear(self,x,y,f,p):
"""
x = (x1,x2); y = (y1,y2)
f = (f11,f12,f21,f22)
p = (x,y)
where x,y is the interpolated point and
fij is the value of the function at the
point (xi,yj).
"""
xp = p[0]; yp = p[1]; x1 = x[0]; x2 = x[1]
y1 = y[0]; y2 = y[1]; f11 = f[0]; f12 = f[1]
f21 = f[2]; f22 = f[3]
t = (xp-x1)/(x2-x1); u = (yp-y1)/(y2-y1)
return (1.0-t)*(1.0-u)*f11 + t*(1.0-u)*f12 + t*u*f22 + u*(1-t)*f21
def get_vphi(self,x,y):
i = int((x-self.xmin)/(self.xmax-self.xmin)*self.nx)
j = int((y-self.ymin)/(self.ymax-self.ymin)*self.ny-0.5)
f11 = self.Vx.data[j,i]
f12 = self.Vx.data[j,i+1]
f21 = self.Vx.data[j+1,i]
f22 = self.Vx.data[j+1,i+1]
x1 = self.domain_x[i]
x2 = self.domain_x[i+1]
y1 = 0.5*(self.domain_y[j]+self.domain_y[j+1])
y2 = 0.5*(self.domain_y[j+1]+self.domain_y[j+2])
vphi = self.bilinear((x1,x2),(y1,y2),
(f11,f12,f21,f22),
(x,y))
return vphi
def get_vrad(self,x,y):
i = int((x-self.xmin)/(self.xmax-self.xmin)*self.nx+0.5)
j = int((y-self.ymin)/(self.ymax-self.ymin)*self.ny)
f11 = self.Vy.data[j,i]
f12 = self.Vy.data[j,i+1]
f21 = self.Vy.data[j+1,i]
f22 = self.Vy.data[j+1,i+1]
x1 = 0.5*(self.domain_x[i]+self.domain_x[i+1])
x2 = 0.5*(self.domain_x[i+1]+self.domain_x[i+2])
y1 = self.domain_y[j]
y2 = self.domain_y[j+1]
vrad = self.bilinear((x1,x2),(y1,y2),
(f11,f12,f21,f22),
(x,y))
return vrad
def __euler(self,x,y,frac=0.4,reverse=False):
"""
Reverse inverts the sign of velocity
"""
sign = 1.0
if reverse:
sign = -1
vphi = self.get_vphi(x,y)
vrad = self.get_vrad(x,y)
l = ((self.xmax-self.xmin)/self.nx)**2 + ((self.ymax-self.ymin)/self.ny)**2
h = np.sqrt(l/(vphi**2+vrad**2))
# h = np.min(((self.xmax-self.xmin)/self.nx/np.abs(vphi/y),
# (self.ymax-self.ymin)/self.ny/np.abs(vrad)))
h *= frac
return sign*h*np.array([vphi/y,vrad])
def __get_stream(self,x0,y0,reverse=False,frac=0.4,nmax=10**6,bidirectional=False):
if(bidirectional):
reverse = False
s0 = self.__get_stream(x0,y0,reverse=False,bidirectional=False,nmax=nmax,frac=frac)
print "s0 lista"
s1 = self.__get_stream(x0,y0,reverse=True,bidirectional=False,nmax=nmax,frac=frac)
print "s1 lista"
return (s0,s1)
print 'Computing streamline...'
x = []; y = []
x.append(x0)
y.append(y0)
for i in xrange(nmax):
ds = self.__euler(x0,y0,frac=frac,reverse=reverse)
dx = ds[0]; dy = ds[1]
if(np.sqrt(dx**2+dy**2)<1e-10):
print "Warning: dt is very small, maybe you're in a stagnation point!" +\
"Please, select another initial point."
break
x0 += dx
y0 += dy
if (x0 > self.xmax-2*(self.xmax-self.xmin)/self.nx) or \
(y0 > self.ymax-2*(self.ymax-self.ymin)/self.ny) \
or x0<self.xmin or y0<self.ymin:
print "Warning: It was reached the limit of the box, breaking..."
break
x.append(x0)
y.append(y0)
print "streamline was done in",i, "steps"
stream = np.array([x,y])
return stream
def get_streams(self,x0,y1,y2,n=30,frac=0.5,nmax=10**6):
values = np.arange(y1,y2,(y2-y1)/float(n))
streams = []
i = 0
for y in values:
s = self.__get_stream(x0,y,frac=frac,nmax=nmax)
if(len(s[0])==1):
print "Warning! Sreamline was null. Recomputing it in reverse mode."
s = self.__get_stream(x0,y,frac=frac,nmax=nmax,reverse=True)
if(len(s[0])==1):
print "The streamline",y,"have a problem, the lenght is Null... Skipped"
continue
print "Streamline",i, "OK"
streams.append(s)
i += 1
return streams
def get_stagnation(self,w=0.1,tol=None):
"""
Computes the stagnation points. Tol is the tolerance (in pixels)
for the stagnation points.
if local is true, the stagnation point is searched near to (xp,yp).
w controls the width of the local area. w is the fraction of the size of the box that represents the local part.
"""
xmin = self.xmin
xmax = self.xmax
ymin = self.ymin
ymax = self.ymax
nx = self.nx
ny = self.ny
vx = self.Vx.data
vy = self.Vy.data
xp = self.planet[0]
yp = self.planet[1]
w = 0.5*w #redefining w!!
if tol == None:
l = np.sqrt(((self.xmax-self.xmin)/self.nx)**2 +
((self.ymax-self.ymin)/self.ny)**2)
tol = 0.5*l # half width of a cell.
lx = (xmax-xmin)
ly = (ymax-ymin)
ip = int((xp-xmin)/(xmax-xmin)*nx)
jp = int((yp-ymin)/(ymax-ymin)*ny)
wx = int(w*nx)
wy = int(w*ny)
x = self.domain_x
y = self.domain_y + 0.5*ly/ny #Be careful with centering!!
xx,yy = np.meshgrid(x,y)
cx = plt.contour(xx[jp-wy:jp+wy,ip-wx:ip+wx],yy[jp-wy:jp+wy,ip-wx:ip+wx],vx[jp-wy:jp+wy,ip-wx:ip+wx],
levels=(0,)) #getting 0 contours
x = self.domain_x + 0.5*lx/nx
y = self.domain_y
xx,yy = np.meshgrid(x,y)
cy = plt.contour(xx[jp-wy:jp+wy,ip-wx:ip+wx],yy[jp-wy:jp+wy,ip-wx:ip+wx],vy[jp-wy:jp+wy,ip-wx:ip+wx],
levels=(0,))
px = cx.collections[0].get_paths()
py = cy.collections[0].get_paths()
nx = np.shape(px)[0]
ny = np.shape(py)[0]
if(nx>1 or ny>1):
print "x-contours=",nx,"y-contours=",ny
s0,s1 = self.get_stagnation(w=w/2.0,tol=None)
return s0,s1
temp = []
for i in range(nx):
cx = px[i].vertices
# plt.plot(cx[:,0],cx[:,1],'ko')
for j in range(ny):
cy = py[j].vertices
# plt.plot(cy[:,0],cy[:,1],'ro')
for k in cx:
dif = np.sqrt((k[0]-cy[:,0])**2 + (k[1]-cy[:,1])**2)
if np.any(dif < tol):
# index = np.where(dif<tol)
# print k[0],k[1]
# temp.append((cy[index,0], cy[index,1]))
temp.append((k[0],k[1]))
# return temp
sx = []
sy = []
for l in temp:
# sx.append(l[0][0][0])
# sy.append(l[1][0][0])
sx.append(l[0])
sy.append(l[1])
dif = np.sqrt((np.asarray(sx)-xp)**2+(np.asarray(sy)-yp)**2)
try:
index = np.where(dif == dif.min())[0]
return sx[index],sy[index]
except ValueError:
print "Error getting the stagnation point! Try to increase 'tol' value."
return 0,0
def get_separatrix(self,niter=10,tol=None,noise=10.0,w=0.1,frac=0.05,x0=None,y0=None):
"""
Noise is given in fraction of cells
"""
separatrix = []
dx = (self.xmax-self.xmin)/self.nx
dy = (self.ymax-self.ymin)/self.ny
if(x0 == None and y0==None):
s0,s1 = self.get_stagnation(w=w,tol=tol)
if(s0 == 0 and s1 == 0):
return
print "Stagnation point was founded:", s0,s1
else:
s0,s1 = x0,y0
for i in range(niter):
s = self.__get_stream(s0+noise*dx*(-0.5+np.random.rand()),
s1+noise*dy*(-0.5+np.random.rand()),
bidirectional=True,frac=frac)
separatrix.append(s)
return separatrix
def plot_stagnation(self,s):
plt.plot(s[0],s[1],'ro',ms=10)
def plot_streams(self,streams):
"""
Ploting the streams computed with get_streams method.
"""
for stream in streams:
plt.plot(stream[0],stream[1],'k')
def plot_separatrix(self,separatrix):
for s in separatrix:
plt.plot(s[0][0],s[0][1],'r',linewidth=2.0)
plt.plot(s[1][0],s[1][1],'r',linewidth=2.0)
def get_map(self,alpha=0.1):
ymin = self.planet[1]*(1-alpha)
ymax = self.planet[1]*(1+alpha)
streams1 = self.get_streams(-np.pi,ymin,ymax,n=30,frac=0.5)
streams2 = self.get_streams(np.pi-2*(self.xmax-self.xmin)/self.nx,ymin,ymax,n=30,frac=0.5)
self.plot_streams(streams1)
self.plot_streams(streams2)
separ = self.get_separatrix(niter=15,frac=0.1,noise=0.1)
stag = self.get_stagnation()
self.plot_stagnation(stag)
self.plot_separatrix(separ)
data = {}
data['separatrix'] = separ
data['stagnation'] = stag
data['streams_left'] = streams1
data['streams_right'] = streams2
return data
class Reader():
class __Fargo3d():
def __init__(self, n, name=None,
dims_name="dims.dat",directory=None):
if directory == None:
input_file = name + "{0:06d}.dat".format(n)
else:
input_file = directory + name + "{0:06d}.dat".format(n)
dims_name = directory + dims_name
if name == None:
return None
if type(name) != str:
print "Error! input_file must be a string"
return
self.directory = directory
self.n = n
self.name = input_file
self.parameters = self.__shape(dims_name)
self.axis = self.__axis(self.parameters)
self.domain = self.__domain(self.axis,self.parameters)
self.data = self.__reader(input_file,
self.parameters,
self.axis)
self.planet = self.__planet(self.n)
def __reader(self,input_file, param, axes):
dim = []
for i in axes:
dim.append(int(param['n'+i]))
return np.fromfile(input_file).reshape(dim)
def __shape(self,dims_name):
"""
Determines the shape of the simulation.
"""
dims = open(dims_name,"r")
lines = dims.readlines()
name = lines[0].split("\t")
vals = lines[1].split("\t")
parameters = {}
for i,j in zip(name,vals):
parameters[i.lower()] = float(j)
return parameters
def __domain(self, axis,parameters):
d = {}
for i in axis:
if i != 'x':
a = parameters['ngh'+i]
b = parameters['n'+i]
else:
a = 0
b = -1
if self.directory == None:
d[i] = np.loadtxt("domain_{0}.dat".format(i))[a:a+b]
else:
d[i] = np.loadtxt(self.directory+"domain_{0}.dat".format(i))[a:a+b]
return d
def __axis(self,p):
"""
Determines the dimensions of the simulation,
ie: x, y, z, xy, yz, xz, xz, xyz
"""
ax = ['z', 'y', 'x']
if p['nx'] == 1: ax.remove('x')
if p['ny'] == 1: ax.remove('y')
if p['nz'] == 1: ax.remove('z')
return ax
def __planet(self,n):
try:
if self.directory == None:
p = np.loadtxt("planet0.dat")
else:
p = np.loadtxt(self.directory + "planet0.dat")
except IOError:
return (0,0)
np.where(p[:,0] == n)
index = p[0][0]
xp = p[index,1]; yp = p[index,2]
r = np.sqrt(xp**2+yp**2)
phi = np.arctan2(yp,xp)
return (phi,r)
def __init__(read_class,n,name=None,
dims_name="dims.dat",
reader='fargo3d',directory=None):
"""
Note that here read_class is equivalent to self.
"""
if(reader == 'fargo3d'):
return read_class.__Fargo3d(n=n,name=name, dims_name=dims_name,directory=directory)
class Field(Reader):
def __init__(self, n, name, reader='fargo3d',directory=None):
reader = Reader.__init__(self, n=n, name=name, reader = 'fargo3d',directory=directory)
self.name = reader.name
self.n = n
self.parameters = reader.parameters
self.axis = reader.axis
self.domain = reader.domain
self.data = reader.data
self.planet = reader.planet
self.dx = (self.parameters['xmax'] -
self.parameters['xmin'])/self.parameters['nx']
self.dy = (self.parameters['ymax'] -
self.parameters['ymin'])/self.parameters['ny']
self.extent = [self.parameters['xmin'],
self.parameters['xmax'],
self.parameters['ymin'],
self.parameters['ymax']]
self.nghy = self.parameters['nghy']
self.nghz = self.parameters['nghz']
return
def Get_properties(self):
print "Input file is:", self.name
print "Number of snapshot si:", self.n
print "The matrix of the data is:", self.data
print "The parameters of the simulations are:", self.parameters
print "The active axes are:", self.axis
print "The domain of each axis is:", self.domain
def __projection(self,x_old,y_old,data,x_new,y_new,zero=1e-8):
"""
rmask = 0 --> Number of bad radial boundary cells (ideal for to do plots of
zero = value of the mask
"""
from matplotlib.mlab import griddata
data[0,:] = zero
new_data = griddata(x_old.flat,
y_old.flat,
data.T.flat, # No olvidar trasponer!
x_new,
y_new,
interp='linear')
return new_data
def __pol2cart(self,r,t,data,frac=0.7, n=1000, multiperiodic=1):
"""
frac=0.7 is the fraction of rmax that will be used for the box size.
Box size is 2*frac*rmax
n=1000 is the output resolution
"""
rr, tt = np.meshgrid(r,t)
xx = rr*np.cos(multiperiodic*tt)
yy = rr*np.sin(multiperiodic*tt)
rmax = frac*r.max()
x = np.arange(-rmax,rmax,2.0*rmax/float(n))
extent = [-rmax,rmax,-rmax,rmax]
return self.__projection(xx,yy,data,x,x),extent
def Plot2d(self, log=False,cmap=plt.cm.hot, norm=None, aspect=None, interpolation=None,
alpha=None, vmin=None, vmax=None, origin='lower', extent=None,
shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None,
url=None, hold=None, rmask = 0,
projection='pol', n=1000, frac=0.7, multiperiodic=1.0):
"""
For now, only works with 2d-fields
Local parameters:
log = False --> It activates logscale. (log10(abs(data)))
rmask = 0 --> Number of bad radial boundary cells (ideal for to do plots of
simulations with strong radial boundary conditions.)
projection=pol is the coordinate sistem for plotting. Options: {'pol','cart'}
"""
if log == True:
data = np.log10(np.abs(self.data[rmask:-(1+rmask),:]))
else:
data = self.data[rmask:-(1+rmask),:]
if projection == 'cart':
data,extent = self.__pol2cart(self.domain['y'][0+rmask:-(1+rmask)],self.domain['x'],data,
frac=frac,n=n, multiperiodic=multiperiodic)
plt.imshow(data,
cmap=cmap, norm=norm, aspect=aspect,
interpolation=interpolation, alpha=alpha,
vmin=vmin, vmax=vmax, origin=origin,
shape=shape, filternorm=filternorm,
filterrad=filterrad, imlim=imlim,
resample=resample, url=url, hold=hold,
extent=extent)
if projection == 'pol':
if extent == None:
extent = [self.domain['x'][0],
self.domain['x'][-1],
self.domain['y'][0+rmask],
self.domain['y'][-(1+rmask)]]
plt.xlabel(self.axis[1])
plt.ylabel(self.axis[0])
plt.imshow(data,
cmap=cmap, norm=norm, aspect=aspect,
interpolation=interpolation, alpha=alpha,
vmin=vmin, vmax=vmax, origin=origin,
shape=shape, filternorm=filternorm,
filterrad=filterrad, imlim=imlim,
resample=resample, url=url, hold=hold,
extent=extent)
plt.show()
| gpl-3.0 |
HolgerPeters/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12291 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
RyanWilsonDev/DomoPy | setup.py | 1 | 1771 | import os
from setuptools import setup
# Manage version in __init__.py
def get_version(version_tuple):
"""version from tuple accounting for possible a,b,rc tags."""
# in case an a, b, or rc tag is added
if not isinstance(version_tuple[-1], int):
return '.'.join(
map(str, version_tuple[:-1])
) + version_tuple[-1]
return '.'.join(map(str, version_tuple))
# path to __init__ for package
INIT = os.path.join(
os.path.dirname(__file__), 'domopy',
'__init__.py'
)
VERSION_LINE = list(
filter(lambda line: line.startswith('VERSION'), open(INIT))
)[0]
# lotta effort but package might not be importable before
# install is finished so can't just import VERSION
VERSION = get_version(eval(VERSION_LINE.split('=')[-1]))
setup(
name='domopy',
version=VERSION,
author='Ryan Wilson',
license='MIT',
url='https://github.com/RyanWilsonDev/DomoPy',
description="methods for interacting with Domo APIs",
long_description="""
Set of classes and methods for interacting with
the Domo Data APIs and Domo User APIs. Handles
Authentication, pulling data from domo, creating
new domo datasets, replace/appending existing
datasets, etc.
""",
packages=[
'domopy'
],
package_data={'': ['LICENSE'], 'LICENSES': ['NOTICE', 'PANDAS_LICENSE', 'REQUESTS_LICENSE']},
include_package_data=True,
install_requires=[
'pandas',
'requests',
'requests_oauthlib'
],
classifiers=(
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython'
),
tests_require=[]
)
| mit |
ningchi/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
biocore/qiita | qiita_db/handlers/prep_template.py | 3 | 4211 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from json import loads
from os.path import basename
from tornado.web import HTTPError
import pandas as pd
import qiita_db as qdb
from .oauth2 import OauthBaseHandler, authenticate_oauth
def _get_prep_template(pid):
"""Returns the prep template with the given `pid` if it exists
Parameters
----------
pid : str
The prep template id
Returns
-------
qiita_db.metadata_template.prep_template.PrepTemplate
The requested prep template
Raises
------
HTTPError
If the prep template does not exist, with error code 404
If there is a problem instantiating the template, with error code 500
"""
try:
pid = int(pid)
pt = qdb.metadata_template.prep_template.PrepTemplate(pid)
except qdb.exceptions.QiitaDBUnknownIDError:
raise HTTPError(404)
except Exception as e:
raise HTTPError(500, reason='Error instantiating prep template %s: %s'
% (pid, str(e)))
return pt
class PrepTemplateDBHandler(OauthBaseHandler):
@authenticate_oauth
def get(self, prep_id):
"""Retrieves the prep template information
Parameters
----------
prep_id: str
The id of the prep template whose information is being retrieved
Returns
-------
dict
The prep information:
'data_type': prep info data type
'artifact': artifact attached to the given prep
'investigation_type': prep info investigation type
'study': study that the prep info belongs to
'status': prep info status
'sample-file': the path to the sample information file
'prep-file': the path to the prep info file
"""
with qdb.sql_connection.TRN:
pt = _get_prep_template(prep_id)
prep_files = [fp for _, fp in pt.get_filepaths()
if 'qiime' not in basename(fp)]
artifact = pt.artifact.id if pt.artifact is not None else None
sid = pt.study_id
response = {
'data_type': pt.data_type(),
'artifact': artifact,
'investigation_type': pt.investigation_type,
'study': sid,
'status': pt.status,
# get_filepaths returns an ordered list of [filepath_id,
# filepath] and we want the last pair
'sample-file': qdb.study.Study(
sid).sample_template.get_filepaths()[0][1],
# The first element in the prep_files is the newest
# prep information file - hence the correct one
'prep-file': prep_files[0]
}
self.write(response)
class PrepTemplateDataHandler(OauthBaseHandler):
@authenticate_oauth
def get(self, prep_id):
"""Retrieves the prep contents
Parameters
----------
prep_id : str
The id of the prep template whose information is being retrieved
Returns
-------
dict
The contents of the prep information keyed by sample id
"""
with qdb.sql_connection.TRN:
pt = _get_prep_template(prep_id)
response = {'data': pt.to_dataframe().to_dict(orient='index')}
self.write(response)
class PrepTemplateAPItestHandler(OauthBaseHandler):
@authenticate_oauth
def post(self):
prep_info_dict = loads(self.get_argument('prep_info'))
study = self.get_argument('study')
data_type = self.get_argument('data_type')
metadata = pd.DataFrame.from_dict(prep_info_dict, orient='index')
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, qdb.study.Study(study), data_type)
self.write({'prep': pt.id})
| bsd-3-clause |
mne-tools/mne-tools.github.io | mne-realtime/_downloads/925deae0ba34fd05c8245119722686ef/rt_feedback_server.py | 1 | 5209 | """
==============================================
Real-time feedback for decoding :: Server Side
==============================================
This example demonstrates how to setup a real-time feedback
mechanism using StimServer and StimClient.
The idea here is to display future stimuli for the class which
is predicted less accurately. This allows on-demand adaptation
of the stimuli depending on the needs of the classifier.
To run this example, open ipython in two separate terminals.
In the first, run rt_feedback_server.py and then wait for the
message
RtServer: Start
Once that appears, run rt_feedback_client.py in the other terminal
and the feedback script should start.
All brain responses are simulated from a fiff file to make it easy
to test. However, it should be possible to adapt this script
for a real experiment.
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import mne
from mne.datasets import sample
from mne.decoding import Vectorizer, FilterEstimator
from mne_realtime import StimServer
from mne_realtime import MockRtClient
print(__doc__)
# Load fiff file to simulate data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# Instantiating stimulation server
# The with statement is necessary to ensure a clean exit
with StimServer(port=4218) as stim_server:
# The channels to be used while decoding
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=False,
stim=False, exclude=raw.info['bads'])
rt_client = MockRtClient(raw)
# Constructing the pipeline for classification
# don't highpass filter because of short signal length of epochs
filt = FilterEstimator(raw.info, None, 40,
# keep all channels that are picked from the
# RtClient
picks=np.arange(len(picks), dtype=int))
scaler = preprocessing.StandardScaler()
vectorizer = Vectorizer()
clf = SVC(C=1, kernel='linear')
concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
('scaler', scaler), ('svm', clf)])
stim_server.start(verbose=True)
# Just some initially decided events to be simulated
# Rest will decided on the fly
ev_list = [4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4]
score_c1, score_c2, score_x = [], [], []
for ii in range(50):
# Tell the stim_client about the next stimuli
stim_server.add_trigger(ev_list[ii])
# Collecting data
if ii == 0:
X = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')[None, ...]
y = ev_list[ii]
else:
X_temp = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')
X_temp = X_temp[np.newaxis, ...]
X = np.concatenate((X, X_temp), axis=0)
time.sleep(1) # simulating the isi
y = np.append(y, ev_list[ii])
# Start decoding after collecting sufficient data
if ii >= 10:
# Now start doing rtfeedback
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=7)
y_pred = concat_classifier.fit(X_train, y_train).predict(X_test)
cm = confusion_matrix(y_test, y_pred)
score_c1.append(float(cm[0, 0]) / sum(cm, 1)[0] * 100)
score_c2.append(float(cm[1, 1]) / sum(cm, 1)[1] * 100)
# do something if one class is decoded better than the other
if score_c1[-1] < score_c2[-1]:
print("We decoded class RV better than class LV")
ev_list.append(3) # adding more LV to future simulated data
else:
print("We decoded class LV better than class RV")
ev_list.append(4) # adding more RV to future simulated data
# Clear the figure
plt.clf()
# The x-axis for the plot
score_x.append(ii)
# Now plot the accuracy
plt.plot(score_x[-5:], score_c1[-5:])
plt.plot(score_x[-5:], score_c2[-5:])
plt.xlabel('Trials')
plt.ylabel('Classification score (% correct)')
plt.title('Real-time feedback')
plt.ylim([0, 100])
plt.xticks(score_x[-5:])
plt.legend(('LV', 'RV'), loc='upper left')
plt.draw()
plt.pause(0.1)
plt.draw() # Final figure
| bsd-3-clause |
amolkahat/pandas | pandas/tests/frame/test_period.py | 3 | 5534 | import numpy as np
from numpy.random import randn
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas import (PeriodIndex, period_range, DataFrame, date_range,
Index, to_datetime, DatetimeIndex, Timedelta)
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(object):
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5, name='index')
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == 'Index'
assert rng.name == 'index'
rs = df.reset_index().set_index('index')
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_frame_to_time_stamp(self):
K = 5
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
df = DataFrame(randn(len(index), K), index=index)
df['mix'] = 'a'
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
exp_index = exp_index + Timedelta(1, 'D') - Timedelta(1, 'ns')
result = df.to_timestamp('D', 'end')
tm.assert_index_equal(result.index, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start')
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, 'h') - Timedelta(1, 'ns')
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, 'm') - Timedelta(1, 'ns')
tm.assert_index_equal(result.index, exp_index)
result = df.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, 's') - Timedelta(1, 'ns')
tm.assert_index_equal(result.index, exp_index)
# columns
df = df.T
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
exp_index = exp_index + Timedelta(1, 'D') - Timedelta(1, 'ns')
result = df.to_timestamp('D', 'end', axis=1)
tm.assert_index_equal(result.columns, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start', axis=1)
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end', axis=1)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, 'h') - Timedelta(1, 'ns')
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end', axis=1)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, 'm') - Timedelta(1, 'ns')
tm.assert_index_equal(result.columns, exp_index)
result = df.to_timestamp('S', 'end', axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, 's') - Timedelta(1, 'ns')
tm.assert_index_equal(result.columns, exp_index)
# invalid axis
tm.assert_raises_regex(
ValueError, 'axis', df.to_timestamp, axis=2)
result1 = df.to_timestamp('5t', axis=1)
result2 = df.to_timestamp('t', axis=1)
expected = pd.date_range('2001-01-01', '2009-01-01', freq='AS')
assert isinstance(result1.columns, DatetimeIndex)
assert isinstance(result2.columns, DatetimeIndex)
tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
assert result1.columns.freqstr == 'AS-JAN'
assert result2.columns.freqstr == 'AS-JAN'
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_align_frame(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
ZhangXinNan/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
jmullan/isort | test_isort.py | 4 | 79535 | # coding: utf-8
"""test_isort.py.
Tests all major functionality of the isort library
Should be ran using py.test by simply running py.test in the isort project directory
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import codecs
import os
import shutil
import tempfile
from isort.isort import SortImports
from isort.pie_slice import *
from isort.settings import WrapModes
SHORT_IMPORT = "from third_party import lib1, lib2, lib3, lib4"
REALLY_LONG_IMPORT = ("from third_party import lib1, lib2, lib3, lib4, lib5, lib6, lib7, lib8, lib9, lib10, lib11,"
"lib12, lib13, lib14, lib15, lib16, lib17, lib18, lib20, lib21, lib22")
REALLY_LONG_IMPORT_WITH_COMMENT = ("from third_party import lib1, lib2, lib3, lib4, lib5, lib6, lib7, lib8, lib9, "
"lib10, lib11, lib12, lib13, lib14, lib15, lib16, lib17, lib18, lib20, lib21, lib22"
" # comment")
def test_happy_path():
"""Test the most basic use case, straight imports no code, simply not organized by category."""
test_input = ("import sys\n"
"import os\n"
"import myproject.test\n"
"import django.settings")
test_output = SortImports(file_contents=test_input, known_third_party=['django']).output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import django.settings\n"
"\n"
"import myproject.test\n")
def test_code_intermixed():
"""Defines what should happen when isort encounters imports intermixed with
code.
(it should pull them all to the top)
"""
test_input = ("import sys\n"
"print('yo')\n"
"print('I like to put code between imports cause I want stuff to break')\n"
"import myproject.test\n")
test_output = SortImports(file_contents=test_input).output
assert test_output == ("import sys\n"
"\n"
"import myproject.test\n"
"\n"
"print('yo')\n"
"print('I like to put code between imports cause I want stuff to break')\n")
def test_correct_space_between_imports():
"""Ensure after imports a correct amount of space (in newlines) is
enforced.
(2 for method, class, or decorator definitions 1 for anything else)
"""
test_input_method = ("import sys\n"
"def my_method():\n"
" print('hello world')\n")
test_output_method = SortImports(file_contents=test_input_method).output
assert test_output_method == ("import sys\n"
"\n"
"\n"
"def my_method():\n"
" print('hello world')\n")
test_input_decorator = ("import sys\n"
"@my_decorator\n"
"def my_method():\n"
" print('hello world')\n")
test_output_decorator = SortImports(file_contents=test_input_decorator).output
assert test_output_decorator == ("import sys\n"
"\n"
"\n"
"@my_decorator\n"
"def my_method():\n"
" print('hello world')\n")
test_input_class = ("import sys\n"
"class MyClass(object):\n"
" pass\n")
test_output_class = SortImports(file_contents=test_input_class).output
assert test_output_class == ("import sys\n"
"\n"
"\n"
"class MyClass(object):\n"
" pass\n")
test_input_other = ("import sys\n"
"print('yo')\n")
test_output_other = SortImports(file_contents=test_input_other).output
assert test_output_other == ("import sys\n"
"\n"
"print('yo')\n")
def test_sort_on_number():
"""Ensure numbers get sorted logically (10 > 9 not the other way around)"""
test_input = ("import lib10\n"
"import lib9\n")
test_output = SortImports(file_contents=test_input).output
assert test_output == ("import lib9\n"
"import lib10\n")
def test_line_length():
"""Ensure isort enforces the set line_length."""
assert len(SortImports(file_contents=REALLY_LONG_IMPORT, line_length=80).output.split("\n")[0]) <= 80
assert len(SortImports(file_contents=REALLY_LONG_IMPORT, line_length=120).output.split("\n")[0]) <= 120
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, line_length=42).output
assert test_output == ("from third_party import (lib1, lib2, lib3,\n"
" lib4, lib5, lib6,\n"
" lib7, lib8, lib9,\n"
" lib10, lib11,\n"
" lib12, lib13,\n"
" lib14, lib15,\n"
" lib16, lib17,\n"
" lib18, lib20,\n"
" lib21, lib22)\n")
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, line_length=42, wrap_length=32).output
assert test_output == ("from third_party import (lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22)\n")
def test_output_modes():
"""Test setting isort to use various output modes works as expected"""
test_output_grid = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.GRID, line_length=40).output
assert test_output_grid == ("from third_party import (lib1, lib2,\n"
" lib3, lib4,\n"
" lib5, lib6,\n"
" lib7, lib8,\n"
" lib9, lib10,\n"
" lib11, lib12,\n"
" lib13, lib14,\n"
" lib15, lib16,\n"
" lib17, lib18,\n"
" lib20, lib21,\n"
" lib22)\n")
test_output_vertical = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.VERTICAL, line_length=40).output
assert test_output_vertical == ("from third_party import (lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22)\n")
comment_output_vertical = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.VERTICAL, line_length=40).output
assert comment_output_vertical == ("from third_party import (lib1, # comment\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22)\n")
test_output_hanging_indent = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent=" ").output
assert test_output_hanging_indent == ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, \\\n"
" lib8, lib9, lib10, lib11, lib12, \\\n"
" lib13, lib14, lib15, lib16, lib17, \\\n"
" lib18, lib20, lib21, lib22\n")
comment_output_hanging_indent = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent=" ").output
assert comment_output_hanging_indent == ("from third_party import lib1, \\ # comment\n"
" lib2, lib3, lib4, lib5, lib6, \\\n"
" lib7, lib8, lib9, lib10, lib11, \\\n"
" lib12, lib13, lib14, lib15, lib16, \\\n"
" lib17, lib18, lib20, lib21, lib22\n")
test_output_vertical_indent = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=40, indent=" ").output
assert test_output_vertical_indent == ("from third_party import (\n"
" lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22\n"
")\n")
comment_output_vertical_indent = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=40, indent=" ").output
assert comment_output_vertical_indent == ("from third_party import ( # comment\n"
" lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22\n"
")\n")
test_output_vertical_grid = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.VERTICAL_GRID,
line_length=40, indent=" ").output
assert test_output_vertical_grid == ("from third_party import (\n"
" lib1, lib2, lib3, lib4, lib5, lib6,\n"
" lib7, lib8, lib9, lib10, lib11,\n"
" lib12, lib13, lib14, lib15, lib16,\n"
" lib17, lib18, lib20, lib21, lib22)\n")
comment_output_vertical_grid = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.VERTICAL_GRID,
line_length=40, indent=" ").output
assert comment_output_vertical_grid == ("from third_party import ( # comment\n"
" lib1, lib2, lib3, lib4, lib5, lib6,\n"
" lib7, lib8, lib9, lib10, lib11,\n"
" lib12, lib13, lib14, lib15, lib16,\n"
" lib17, lib18, lib20, lib21, lib22)\n")
test_output_vertical_grid_grouped = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.VERTICAL_GRID_GROUPED,
line_length=40, indent=" ").output
assert test_output_vertical_grid_grouped == ("from third_party import (\n"
" lib1, lib2, lib3, lib4, lib5, lib6,\n"
" lib7, lib8, lib9, lib10, lib11,\n"
" lib12, lib13, lib14, lib15, lib16,\n"
" lib17, lib18, lib20, lib21, lib22\n"
")\n")
comment_output_vertical_grid_grouped = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.VERTICAL_GRID_GROUPED,
line_length=40, indent=" ").output
assert comment_output_vertical_grid_grouped == ("from third_party import ( # comment\n"
" lib1, lib2, lib3, lib4, lib5, lib6,\n"
" lib7, lib8, lib9, lib10, lib11,\n"
" lib12, lib13, lib14, lib15, lib16,\n"
" lib17, lib18, lib20, lib21, lib22\n"
")\n")
output_noqa = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.NOQA).output
assert output_noqa == "from third_party import lib1 lib2 lib3 lib4 lib5 lib6 lib7 lib8 lib9 lib10 lib11 lib12 lib13 lib14 lib15 lib16 lib17 lib18 lib20 lib21 lib22 # NOQA comment\n" # NOQA
def test_qa_comment_case():
test_input = "from veryveryveryveryveryveryveryveryveryveryvery import X # NOQA"
test_output = SortImports(file_contents=test_input, line_length=40, multi_line_output=WrapModes.NOQA).output
assert test_output == "from veryveryveryveryveryveryveryveryveryveryvery import X # NOQA\n"
test_input = "import veryveryveryveryveryveryveryveryveryveryvery # NOQA"
test_output = SortImports(file_contents=test_input, line_length=40, multi_line_output=WrapModes.NOQA).output
assert test_output == "import veryveryveryveryveryveryveryveryveryveryvery # NOQA\n"
def test_length_sort():
"""Test setting isort to sort on length instead of alphabetically."""
test_input = ("import medium_sizeeeeeeeeeeeeee\n"
"import shortie\n"
"import looooooooooooooooooooooooooooooooooooooong\n"
"import medium_sizeeeeeeeeeeeeea\n")
test_output = SortImports(file_contents=test_input, length_sort=True).output
assert test_output == ("import shortie\n"
"import medium_sizeeeeeeeeeeeeea\n"
"import medium_sizeeeeeeeeeeeeee\n"
"import looooooooooooooooooooooooooooooooooooooong\n")
def test_convert_hanging():
"""Ensure that isort will convert hanging indents to correct indent
method."""
test_input = ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, \\\n"
" lib8, lib9, lib10, lib11, lib12, \\\n"
" lib13, lib14, lib15, lib16, lib17, \\\n"
" lib18, lib20, lib21, lib22\n")
test_output = SortImports(file_contents=test_input, multi_line_output=WrapModes.GRID,
line_length=40).output
assert test_output == ("from third_party import (lib1, lib2,\n"
" lib3, lib4,\n"
" lib5, lib6,\n"
" lib7, lib8,\n"
" lib9, lib10,\n"
" lib11, lib12,\n"
" lib13, lib14,\n"
" lib15, lib16,\n"
" lib17, lib18,\n"
" lib20, lib21,\n"
" lib22)\n")
def test_custom_indent():
"""Ensure setting a custom indent will work as expected."""
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent=" ", balanced_wrapping=False).output
assert test_output == ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, lib8, \\\n"
" lib9, lib10, lib11, lib12, lib13, \\\n"
" lib14, lib15, lib16, lib17, lib18, \\\n"
" lib20, lib21, lib22\n")
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent="' '", balanced_wrapping=False).output
assert test_output == ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, lib8, \\\n"
" lib9, lib10, lib11, lib12, lib13, \\\n"
" lib14, lib15, lib16, lib17, lib18, \\\n"
" lib20, lib21, lib22\n")
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent="tab", balanced_wrapping=False).output
assert test_output == ("from third_party import lib1, lib2, \\\n"
"\tlib3, lib4, lib5, lib6, lib7, lib8, \\\n"
"\tlib9, lib10, lib11, lib12, lib13, \\\n"
"\tlib14, lib15, lib16, lib17, lib18, \\\n"
"\tlib20, lib21, lib22\n")
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent=2, balanced_wrapping=False).output
assert test_output == ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, lib8, \\\n"
" lib9, lib10, lib11, lib12, lib13, \\\n"
" lib14, lib15, lib16, lib17, lib18, \\\n"
" lib20, lib21, lib22\n")
def test_use_parentheses():
test_input = (
"from fooooooooooooooooooooooooo.baaaaaaaaaaaaaaaaaaarrrrrrr import \\"
" my_custom_function as my_special_function"
)
test_output = SortImports(
file_contents=test_input, known_third_party=['django'],
line_length=79, use_parentheses=True,
).output
assert '(' in test_output
def test_skip():
"""Ensure skipping a single import will work as expected."""
test_input = ("import myproject\n"
"import django\n"
"print('hey')\n"
"import sys # isort:skip this import needs to be placed here\n\n\n\n\n\n\n")
test_output = SortImports(file_contents=test_input, known_third_party=['django']).output
assert test_output == ("import django\n"
"\n"
"import myproject\n"
"\n"
"print('hey')\n"
"import sys # isort:skip this import needs to be placed here\n")
def test_skip_with_file_name():
"""Ensure skipping a file works even when file_contents is provided."""
test_input = ("import django\n"
"import myproject\n")
skipped = SortImports(file_path='/baz.py', file_contents=test_input, known_third_party=['django'],
skip=['baz.py']).skipped
assert skipped
def test_force_to_top():
"""Ensure forcing a single import to the top of its category works as expected."""
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1\n")
test_output = SortImports(file_contents=test_input, force_to_top=['lib5']).output
assert test_output == ("import lib5\n"
"import lib1\n"
"import lib2\n"
"import lib6\n")
def test_add_imports():
"""Ensures adding imports works as expected."""
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1\n\n")
test_output = SortImports(file_contents=test_input, add_imports=['import lib4', 'import lib7']).output
assert test_output == ("import lib1\n"
"import lib2\n"
"import lib4\n"
"import lib5\n"
"import lib6\n"
"import lib7\n")
# Using simplified syntax
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1\n\n")
test_output = SortImports(file_contents=test_input, add_imports=['lib4', 'lib7', 'lib8.a']).output
assert test_output == ("import lib1\n"
"import lib2\n"
"import lib4\n"
"import lib5\n"
"import lib6\n"
"import lib7\n"
"from lib8 import a\n")
# On a file that has no pre-existing imports
test_input = ('"""Module docstring"""\n'
'\n'
'class MyClass(object):\n'
' pass\n')
test_output = SortImports(file_contents=test_input, add_imports=['from __future__ import print_function']).output
assert test_output == ('"""Module docstring"""\n'
'from __future__ import print_function\n'
'\n'
'\n'
'class MyClass(object):\n'
' pass\n')
# On a file that has no pre-existing imports, and no doc-string
test_input = ('class MyClass(object):\n'
' pass\n')
test_output = SortImports(file_contents=test_input, add_imports=['from __future__ import print_function']).output
assert test_output == ('from __future__ import print_function\n'
'\n'
'\n'
'class MyClass(object):\n'
' pass\n')
# On a file with no content what so ever
test_input = ("")
test_output = SortImports(file_contents=test_input, add_imports=['lib4']).output
assert test_output == ("")
# On a file with no content what so ever, after force_adds is set to True
test_input = ("")
test_output = SortImports(file_contents=test_input, add_imports=['lib4'], force_adds=True).output
assert test_output == ("import lib4\n")
def test_remove_imports():
"""Ensures removing imports works as expected."""
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1")
test_output = SortImports(file_contents=test_input, remove_imports=['lib2', 'lib6']).output
assert test_output == ("import lib1\n"
"import lib5\n")
# Using natural syntax
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1\n"
"from lib8 import a")
test_output = SortImports(file_contents=test_input, remove_imports=['import lib2', 'import lib6',
'from lib8 import a']).output
assert test_output == ("import lib1\n"
"import lib5\n")
def test_explicitly_local_import():
"""Ensure that explicitly local imports are separated."""
test_input = ("import lib1\n"
"import lib2\n"
"import .lib6\n"
"from . import lib7")
assert SortImports(file_contents=test_input).output == ("import lib1\n"
"import lib2\n"
"\n"
"import .lib6\n"
"from . import lib7\n")
def test_quotes_in_file():
"""Ensure imports within triple quotes don't get imported."""
test_input = ('import os\n'
'\n'
'"""\n'
'Let us\n'
'import foo\n'
'okay?\n'
'"""\n')
assert SortImports(file_contents=test_input).output == test_input
test_input = ('import os\n'
'\n'
"'\"\"\"'\n"
'import foo\n')
assert SortImports(file_contents=test_input).output == ('import os\n'
'\n'
'import foo\n'
'\n'
"'\"\"\"'\n")
test_input = ('import os\n'
'\n'
'"""Let us"""\n'
'import foo\n'
'"""okay?"""\n')
assert SortImports(file_contents=test_input).output == ('import os\n'
'\n'
'import foo\n'
'\n'
'"""Let us"""\n'
'"""okay?"""\n')
test_input = ('import os\n'
'\n'
'#"""\n'
'import foo\n'
'#"""')
assert SortImports(file_contents=test_input).output == ('import os\n'
'\n'
'import foo\n'
'\n'
'#"""\n'
'#"""\n')
test_input = ('import os\n'
'\n'
"'\\\n"
"import foo'\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ('import os\n'
'\n'
"'''\n"
"\\'''\n"
'import junk\n'
"'''\n")
assert SortImports(file_contents=test_input).output == test_input
def test_check_newline_in_imports(capsys):
"""Ensure tests works correctly when new lines are in imports."""
test_input = ('from lib1 import (\n'
' sub1,\n'
' sub2,\n'
' sub3\n)\n')
SortImports(file_contents=test_input, multi_line_output=WrapModes.VERTICAL_HANGING_INDENT, line_length=20,
check=True, verbose=True)
out, err = capsys.readouterr()
assert 'SUCCESS' in out
def test_forced_separate():
"""Ensure that forcing certain sub modules to show separately works as expected."""
test_input = ('import sys\n'
'import warnings\n'
'from collections import OrderedDict\n'
'\n'
'from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation\n'
'from django.core.paginator import InvalidPage\n'
'from django.core.urlresolvers import reverse\n'
'from django.db import models\n'
'from django.db.models.fields import FieldDoesNotExist\n'
'from django.utils import six\n'
'from django.utils.deprecation import RenameMethodsBase\n'
'from django.utils.encoding import force_str, force_text\n'
'from django.utils.http import urlencode\n'
'from django.utils.translation import ugettext, ugettext_lazy\n'
'\n'
'from django.contrib.admin import FieldListFilter\n'
'from django.contrib.admin.exceptions import DisallowedModelAdminLookup\n'
'from django.contrib.admin.options import IncorrectLookupParameters, IS_POPUP_VAR, TO_FIELD_VAR\n')
assert SortImports(file_contents=test_input, forced_separate=['django.contrib'],
known_third_party=['django'], line_length=120, order_by_type=False).output == test_input
test_input = ('from .foo import bar\n'
'\n'
'from .y import ca\n')
assert SortImports(file_contents=test_input, forced_separate=['.y'],
line_length=120, order_by_type=False).output == test_input
def test_default_section():
"""Test to ensure changing the default section works as expected."""
test_input = ("import sys\n"
"import os\n"
"import myproject.test\n"
"import django.settings")
test_output = SortImports(file_contents=test_input, known_third_party=['django'],
default_section="FIRSTPARTY").output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import django.settings\n"
"\n"
"import myproject.test\n")
test_output_custom = SortImports(file_contents=test_input, known_third_party=['django'],
default_section="STDLIB").output
assert test_output_custom == ("import myproject.test\n"
"import os\n"
"import sys\n"
"\n"
"import django.settings\n")
def test_first_party_overrides_standard_section():
"""Test to ensure changing the default section works as expected."""
test_input = ("import sys\n"
"import os\n"
"import profile.test\n")
test_output = SortImports(file_contents=test_input, known_first_party=['profile']).output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import profile.test\n")
def test_thirdy_party_overrides_standard_section():
"""Test to ensure changing the default section works as expected."""
test_input = ("import sys\n"
"import os\n"
"import profile.test\n")
test_output = SortImports(file_contents=test_input, known_third_party=['profile']).output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import profile.test\n")
def test_force_single_line_imports():
"""Test to ensure forcing imports to each have their own line works as expected."""
test_input = ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, \\\n"
" lib8, lib9, lib10, lib11, lib12, \\\n"
" lib13, lib14, lib15, lib16, lib17, \\\n"
" lib18, lib20, lib21, lib22\n")
test_output = SortImports(file_contents=test_input, multi_line_output=WrapModes.GRID,
line_length=40, force_single_line=True).output
assert test_output == ("from third_party import lib1\n"
"from third_party import lib2\n"
"from third_party import lib3\n"
"from third_party import lib4\n"
"from third_party import lib5\n"
"from third_party import lib6\n"
"from third_party import lib7\n"
"from third_party import lib8\n"
"from third_party import lib9\n"
"from third_party import lib10\n"
"from third_party import lib11\n"
"from third_party import lib12\n"
"from third_party import lib13\n"
"from third_party import lib14\n"
"from third_party import lib15\n"
"from third_party import lib16\n"
"from third_party import lib17\n"
"from third_party import lib18\n"
"from third_party import lib20\n"
"from third_party import lib21\n"
"from third_party import lib22\n")
def test_force_single_line_long_imports():
test_input = ("from veryveryveryveryveryvery import small, big\n")
test_output = SortImports(file_contents=test_input, multi_line_output=WrapModes.NOQA,
line_length=40, force_single_line=True).output
assert test_output == ("from veryveryveryveryveryvery import big\n"
"from veryveryveryveryveryvery import small # NOQA\n")
def test_titled_imports():
"""Tests setting custom titled/commented import sections."""
test_input = ("import sys\n"
"import unicodedata\n"
"import statistics\n"
"import os\n"
"import myproject.test\n"
"import django.settings")
test_output = SortImports(file_contents=test_input, known_third_party=['django'],
import_heading_stdlib="Standard Library", import_heading_firstparty="My Stuff").output
assert test_output == ("# Standard Library\n"
"import os\n"
"import statistics\n"
"import sys\n"
"import unicodedata\n"
"\n"
"import django.settings\n"
"\n"
"# My Stuff\n"
"import myproject.test\n")
test_second_run = SortImports(file_contents=test_output, known_third_party=['django'],
import_heading_stdlib="Standard Library", import_heading_firstparty="My Stuff").output
assert test_second_run == test_output
def test_balanced_wrapping():
"""Tests balanced wrapping mode, where the length of individual lines maintain width."""
test_input = ("from __future__ import (absolute_import, division, print_function,\n"
" unicode_literals)")
test_output = SortImports(file_contents=test_input, line_length=70, balanced_wrapping=True).output
assert test_output == ("from __future__ import (absolute_import, division,\n"
" print_function, unicode_literals)\n")
def test_relative_import_with_space():
"""Tests the case where the relation and the module that is being imported from is separated with a space."""
test_input = ("from ... fields.sproqet import SproqetCollection")
assert SortImports(file_contents=test_input).output == ("from ...fields.sproqet import SproqetCollection\n")
def test_multiline_import():
"""Test the case where import spawns multiple lines with inconsistent indentation."""
test_input = ("from pkg \\\n"
" import stuff, other_suff \\\n"
" more_stuff")
assert SortImports(file_contents=test_input).output == ("from pkg import more_stuff, other_suff, stuff\n")
# test again with a custom configuration
custom_configuration = {'force_single_line': True,
'line_length': 120,
'known_first_party': ['asdf', 'qwer'],
'default_section': 'THIRDPARTY',
'forced_separate': 'asdf'}
expected_output = ("from pkg import more_stuff\n"
"from pkg import other_suff\n"
"from pkg import stuff\n")
assert SortImports(file_contents=test_input, **custom_configuration).output == expected_output
def test_atomic_mode():
# without syntax error, everything works OK
test_input = ("from b import d, c\n"
"from a import f, e\n")
assert SortImports(file_contents=test_input, atomic=True).output == ("from a import e, f\n"
"from b import c, d\n")
# with syntax error content is not changed
test_input += "while True print 'Hello world'" # blatant syntax error
assert SortImports(file_contents=test_input, atomic=True).output == test_input
def test_order_by_type():
test_input = "from module import Class, CONSTANT, function"
assert SortImports(file_contents=test_input,
order_by_type=True).output == ("from module import CONSTANT, Class, function\n")
# More complex sample data
test_input = "from module import Class, CONSTANT, function, BASIC, Apple"
assert SortImports(file_contents=test_input,
order_by_type=True).output == ("from module import BASIC, CONSTANT, Apple, Class, function\n")
# Really complex sample data, to verify we don't mess with top level imports, only nested ones
test_input = ("import StringIO\n"
"import glob\n"
"import os\n"
"import shutil\n"
"import tempfile\n"
"import time\n"
"from subprocess import PIPE, Popen, STDOUT\n")
assert SortImports(file_contents=test_input, order_by_type=True).output == \
("import glob\n"
"import os\n"
"import shutil\n"
"import StringIO\n"
"import tempfile\n"
"import time\n"
"from subprocess import PIPE, STDOUT, Popen\n")
def test_custom_lines_after_import_section():
"""Test the case where the number of lines to output after imports has been explicitly set."""
test_input = ("from a import b\n"
"foo = 'bar'\n")
# default case is one space if not method or class after imports
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"foo = 'bar'\n")
# test again with a custom number of lines after the import section
assert SortImports(file_contents=test_input, lines_after_imports=2).output == ("from a import b\n"
"\n"
"\n"
"foo = 'bar'\n")
def test_smart_lines_after_import_section():
"""Tests the default 'smart' behavior for dealing with lines after the import section"""
# one space if not method or class after imports
test_input = ("from a import b\n"
"foo = 'bar'\n")
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"foo = 'bar'\n")
# two spaces if a method or class after imports
test_input = ("from a import b\n"
"def my_function():\n"
" pass\n")
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"\n"
"def my_function():\n"
" pass\n")
# two spaces if a method or class after imports - even if comment before function
test_input = ("from a import b\n"
"# comment should be ignored\n"
"def my_function():\n"
" pass\n")
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"\n"
"# comment should be ignored\n"
"def my_function():\n"
" pass\n")
# ensure logic works with both style comments
test_input = ("from a import b\n"
'"""\n'
" comment should be ignored\n"
'"""\n'
"def my_function():\n"
" pass\n")
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"\n"
'"""\n'
" comment should be ignored\n"
'"""\n'
"def my_function():\n"
" pass\n")
def test_settings_combine_instead_of_overwrite():
"""Test to ensure settings combine logically, instead of fully overwriting."""
assert set(SortImports(known_standard_library=['not_std_library']).config['known_standard_library']) == \
set(SortImports().config['known_standard_library'] + ['not_std_library'])
assert set(SortImports(not_known_standard_library=['thread']).config['known_standard_library']) == \
set(item for item in SortImports().config['known_standard_library'] if item != 'thread')
def test_combined_from_and_as_imports():
"""Test to ensure it's possible to combine from and as imports."""
test_input = ("from translate.misc.multistring import multistring\n"
"from translate.storage import base, factory\n"
"from translate.storage.placeables import general, parse as rich_parse\n")
assert SortImports(file_contents=test_input, combine_as_imports=True).output == test_input
def test_as_imports_with_line_length():
"""Test to ensure it's possible to combine from and as imports."""
test_input = ("from translate.storage import base as storage_base\n"
"from translate.storage.placeables import general, parse as rich_parse\n")
assert SortImports(file_contents=test_input, combine_as_imports=False, line_length=40).output == \
("from translate.storage import \\\n base as storage_base\n"
"from translate.storage.placeables import \\\n parse as rich_parse\n"
"from translate.storage.placeables import \\\n general\n")
def test_keep_comments():
"""Test to ensure isort properly keeps comments in tact after sorting."""
# Straight Import
test_input = ("import foo # bar\n")
assert SortImports(file_contents=test_input).output == test_input
# Star import
test_input_star = ("from foo import * # bar\n")
assert SortImports(file_contents=test_input_star).output == test_input_star
# Force Single Line From Import
test_input = ("from foo import bar # comment\n")
assert SortImports(file_contents=test_input, force_single_line=True).output == test_input
# From import
test_input = ("from foo import bar # My Comment\n")
assert SortImports(file_contents=test_input).output == test_input
# More complicated case
test_input = ("from a import b # My Comment1\n"
"from a import c # My Comment2\n")
assert SortImports(file_contents=test_input).output == \
("from a import b # My Comment1\n"
"from a import c # My Comment2\n")
# Test case where imports comments make imports extend pass the line length
test_input = ("from a import b # My Comment1\n"
"from a import c # My Comment2\n"
"from a import d\n")
assert SortImports(file_contents=test_input, line_length=45).output == \
("from a import b # My Comment1\n"
"from a import c # My Comment2\n"
"from a import d\n")
# Test case where imports with comments will be beyond line length limit
test_input = ("from a import b, c # My Comment1\n"
"from a import c, d # My Comment2 is really really really really long\n")
assert SortImports(file_contents=test_input, line_length=45).output == \
("from a import (b, # My Comment1; My Comment2 is really really really really long\n"
" c, d)\n")
# Test that comments are not stripped from 'import ... as ...' by default
test_input = ("from a import b as bb # b comment\n"
"from a import c as cc # c comment\n")
assert SortImports(file_contents=test_input).output == test_input
# Test that 'import ... as ...' comments are not collected inappropriately
test_input = ("from a import b as bb # b comment\n"
"from a import c as cc # c comment\n"
"from a import d\n")
assert SortImports(file_contents=test_input).output == test_input
assert SortImports(file_contents=test_input, combine_as_imports=True).output == (
"from a import b as bb, c as cc, d # b comment; c comment\n"
)
def test_multiline_split_on_dot():
"""Test to ensure isort correctly handles multiline imports, even when split right after a '.'"""
test_input = ("from my_lib.my_package.test.level_1.level_2.level_3.level_4.level_5.\\\n"
" my_module import my_function")
assert SortImports(file_contents=test_input, line_length=70).output == \
("from my_lib.my_package.test.level_1.level_2.level_3.level_4.level_5.my_module import \\\n"
" my_function\n")
def test_import_star():
"""Test to ensure isort handles star imports correctly"""
test_input = ("from blah import *\n"
"from blah import _potato\n")
assert SortImports(file_contents=test_input).output == ("from blah import *\n"
"from blah import _potato\n")
assert SortImports(file_contents=test_input, combine_star=True).output == ("from blah import *\n")
def test_include_trailing_comma():
"""Test for the include_trailing_comma option"""
test_output_grid = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.GRID,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_grid == (
"from third_party import (lib1, lib2,\n"
" lib3, lib4,)\n"
)
test_output_vertical = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.VERTICAL,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_vertical == (
"from third_party import (lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,)\n"
)
test_output_vertical_indent = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_vertical_indent == (
"from third_party import (\n"
" lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
")\n"
)
test_output_vertical_grid = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.VERTICAL_GRID,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_vertical_grid == (
"from third_party import (\n"
" lib1, lib2, lib3, lib4,)\n"
)
test_output_vertical_grid_grouped = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.VERTICAL_GRID_GROUPED,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_vertical_grid_grouped == (
"from third_party import (\n"
" lib1, lib2, lib3, lib4,\n"
")\n"
)
def test_similar_to_std_library():
"""Test to ensure modules that are named similarly to a standard library import don't end up clobbered"""
test_input = ("import datetime\n"
"\n"
"import requests\n"
"import times\n")
assert SortImports(file_contents=test_input, known_third_party=["requests", "times"]).output == test_input
def test_correctly_placed_imports():
"""Test to ensure comments stay on correct placement after being sorted"""
test_input = ("from a import b # comment for b\n"
"from a import c # comment for c\n")
assert SortImports(file_contents=test_input, force_single_line=True).output == \
("from a import b # comment for b\n"
"from a import c # comment for c\n")
assert SortImports(file_contents=test_input).output == ("from a import b # comment for b\n"
"from a import c # comment for c\n")
# Full example test from issue #143
test_input = ("from itertools import chain\n"
"\n"
"from django.test import TestCase\n"
"from model_mommy import mommy\n"
"\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_item_product\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_item_product_d"
"efinition\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_item_product_d"
"efinition_platform\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_item_product_p"
"latform\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_territory_reta"
"il_model\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_territory_reta"
"il_model_definition_platform_provider # noqa\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_item_product\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_item_product_defini"
"tion\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_item_product_defini"
"tion_platform\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_item_product_platfo"
"rm\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_territory_retail_mo"
"del\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_territory_retail_mo"
"del_definition_platform_provider # noqa\n"
"from apps.clientman.commands.download_usage_rights import create_download_usage_right\n"
"from apps.clientman.commands.download_usage_rights import delete_download_usage_right\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_item_product\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_item_product_d"
"efinition\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_item_product_d"
"efinition_platform\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_item_product_p"
"latform\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_territory_reta"
"il_model\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_territory_reta"
"il_model_definition_platform_provider # noqa\n"
"from apps.clientman.commands.download_usage_rights import get_download_rights_for_item\n"
"from apps.clientman.commands.download_usage_rights import get_right\n")
assert SortImports(file_contents=test_input, force_single_line=True, line_length=140,
known_third_party=["django", "model_mommy"]).output == test_input
def test_auto_detection():
"""Initial test to ensure isort auto-detection works correctly - will grow over time as new issues are raised."""
# Issue 157
test_input = ("import binascii\n"
"import os\n"
"\n"
"import cv2\n"
"import requests\n")
assert SortImports(file_contents=test_input, known_third_party=["cv2", "requests"]).output == test_input
# alternative solution
assert SortImports(file_contents=test_input, default_section="THIRDPARTY").output == test_input
def test_same_line_statements():
"""Ensure isort correctly handles the case where a single line contains multiple statements including an import"""
test_input = ("import pdb; import nose\n")
assert SortImports(file_contents=test_input).output == ("import pdb\n"
"\n"
"import nose\n")
test_input = ("import pdb; pdb.set_trace()\n"
"import nose; nose.run()\n")
assert SortImports(file_contents=test_input).output == test_input
def test_long_line_comments():
"""Ensure isort correctly handles comments at the end of extreamly long lines"""
test_input = ("from foo.utils.fabric_stuff.live import check_clean_live, deploy_live, sync_live_envdir, "
"update_live_app, update_live_cron # noqa\n"
"from foo.utils.fabric_stuff.stage import check_clean_stage, deploy_stage, sync_stage_envdir, "
"update_stage_app, update_stage_cron # noqa\n")
assert SortImports(file_contents=test_input).output == \
("from foo.utils.fabric_stuff.live import (check_clean_live, deploy_live, # noqa\n"
" sync_live_envdir, update_live_app, update_live_cron)\n"
"from foo.utils.fabric_stuff.stage import (check_clean_stage, deploy_stage, # noqa\n"
" sync_stage_envdir, update_stage_app, update_stage_cron)\n")
def test_tab_character_in_import():
"""Ensure isort correctly handles import statements that contain a tab character"""
test_input = ("from __future__ import print_function\n"
"from __future__ import\tprint_function\n")
assert SortImports(file_contents=test_input).output == "from __future__ import print_function\n"
def test_split_position():
"""Ensure isort splits on import instead of . when possible"""
test_input = ("from p24.shared.exceptions.master.host_state_flag_unchanged import HostStateUnchangedException\n")
assert SortImports(file_contents=test_input, line_length=80).output == \
("from p24.shared.exceptions.master.host_state_flag_unchanged import \\\n"
" HostStateUnchangedException\n")
def test_place_comments():
"""Ensure manually placing imports works as expected"""
test_input = ("import sys\n"
"import os\n"
"import myproject.test\n"
"import django.settings\n"
"\n"
"# isort:imports-thirdparty\n"
"# isort:imports-firstparty\n"
"print('code')\n"
"\n"
"# isort:imports-stdlib\n")
test_output = SortImports(file_contents=test_input, known_third_party=['django']).output
assert test_output == ("\n# isort:imports-thirdparty\n"
"import django.settings\n"
"\n"
"# isort:imports-firstparty\n"
"import myproject.test\n"
"\n"
"print('code')\n"
"\n"
"# isort:imports-stdlib\n"
"import os\n"
"import sys\n")
def test_placement_control():
"""Ensure that most specific placement control match wins"""
test_input = ("import os\n"
"import sys\n"
"from bottle import Bottle, redirect, response, run\n"
"import p24.imports._argparse as argparse\n"
"import p24.imports._subprocess as subprocess\n"
"import p24.imports._VERSION as VERSION\n"
"import p24.shared.media_wiki_syntax as syntax\n")
test_output = SortImports(file_contents=test_input,
known_first_party=['p24', 'p24.imports._VERSION'],
known_standard_library=['p24.imports'],
known_third_party=['bottle'],
default_section="THIRDPARTY").output
assert test_output == ("import os\n"
"import p24.imports._argparse as argparse\n"
"import p24.imports._subprocess as subprocess\n"
"import sys\n"
"\n"
"from bottle import Bottle, redirect, response, run\n"
"\n"
"import p24.imports._VERSION as VERSION\n"
"import p24.shared.media_wiki_syntax as syntax\n")
def test_custom_sections():
"""Ensure that most specific placement control match wins"""
test_input = ("import os\n"
"import sys\n"
"from django.conf import settings\n"
"from bottle import Bottle, redirect, response, run\n"
"import p24.imports._argparse as argparse\n"
"from django.db import models\n"
"import p24.imports._subprocess as subprocess\n"
"import pandas as pd\n"
"import p24.imports._VERSION as VERSION\n"
"import numpy as np\n"
"import p24.shared.media_wiki_syntax as syntax\n")
test_output = SortImports(file_contents=test_input,
known_first_party=['p24', 'p24.imports._VERSION'],
import_heading_stdlib='Standard Library',
import_heading_thirdparty='Third Party',
import_heading_firstparty='First Party',
import_heading_django='Django',
import_heading_pandas='Pandas',
known_standard_library=['p24.imports'],
known_third_party=['bottle'],
known_django=['django'],
known_pandas=['pandas', 'numpy'],
default_section="THIRDPARTY",
sections=["FUTURE", "STDLIB", "DJANGO", "THIRDPARTY", "PANDAS", "FIRSTPARTY", "LOCALFOLDER"]).output
assert test_output == ("# Standard Library\n"
"import os\n"
"import p24.imports._argparse as argparse\n"
"import p24.imports._subprocess as subprocess\n"
"import sys\n"
"\n"
"# Django\n"
"from django.conf import settings\n"
"from django.db import models\n"
"\n"
"# Third Party\n"
"from bottle import Bottle, redirect, response, run\n"
"\n"
"# Pandas\n"
"import numpy as np\n"
"import pandas as pd\n"
"\n"
"# First Party\n"
"import p24.imports._VERSION as VERSION\n"
"import p24.shared.media_wiki_syntax as syntax\n")
def test_sticky_comments():
"""Test to ensure it is possible to make comments 'stick' above imports"""
test_input = ("import os\n"
"\n"
"# Used for type-hinting (ref: https://github.com/davidhalter/jedi/issues/414).\n"
"from selenium.webdriver.remote.webdriver import WebDriver # noqa\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("from django import forms\n"
"# While this couples the geographic forms to the GEOS library,\n"
"# it decouples from database (by not importing SpatialBackend).\n"
"from django.contrib.gis.geos import GEOSException, GEOSGeometry\n"
"from django.utils.translation import ugettext_lazy as _\n")
assert SortImports(file_contents=test_input).output == test_input
def test_zipimport():
"""Imports ending in "import" shouldn't be clobbered"""
test_input = "from zipimport import zipimport\n"
assert SortImports(file_contents=test_input).output == test_input
def test_from_ending():
"""Imports ending in "from" shouldn't be clobbered."""
test_input = "from foo import get_foo_from, get_foo\n"
expected_output = "from foo import get_foo, get_foo_from\n"
assert SortImports(file_contents=test_input).output == expected_output
def test_from_first():
"""Tests the setting from_first works correctly"""
test_input = "from os import path\nimport os\n"
assert SortImports(file_contents=test_input, from_first=True).output == test_input
def test_top_comments():
"""Ensure correct behavior with top comments"""
test_input = ("# -*- encoding: utf-8 -*-\n"
"# Test comment\n"
"#\n"
"from __future__ import unicode_literals\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("# -*- coding: utf-8 -*-\n"
"from django.db import models\n"
"from django.utils.encoding import python_2_unicode_compatible\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("# Comment\n"
"import sys\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("# -*- coding\n"
"import sys\n")
assert SortImports(file_contents=test_input).output == test_input
def test_consistency():
"""Ensures consistency of handling even when dealing with non ordered-by-type imports"""
test_input = "from sqlalchemy.dialects.postgresql import ARRAY, array\n"
assert SortImports(file_contents=test_input, order_by_type=True).output == test_input
def test_force_grid_wrap():
"""Ensures removing imports works as expected."""
test_input = (
"from foo import lib6, lib7\n"
"from bar import lib2\n"
)
test_output = SortImports(
file_contents=test_input,
force_grid_wrap=True,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT
).output
assert test_output == """from bar import lib2
from foo import (
lib6,
lib7
)
"""
def test_force_grid_wrap_long():
"""Ensure that force grid wrap still happens with long line length"""
test_input = (
"from foo import lib6, lib7\n"
"from bar import lib2\n"
"from babar import something_that_is_kind_of_long"
)
test_output = SortImports(
file_contents=test_input,
force_grid_wrap=True,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=9999,
).output
assert test_output == """from babar import something_that_is_kind_of_long
from bar import lib2
from foo import (
lib6,
lib7
)
"""
def test_uses_jinja_variables():
"""Test a basic set of imports that use jinja variables"""
test_input = ("import sys\n"
"import os\n"
"import myproject.{ test }\n"
"import django.{ settings }")
test_output = SortImports(file_contents=test_input, known_third_party=['django'],
known_first_party=['myproject']).output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import django.{ settings }\n"
"\n"
"import myproject.{ test }\n")
test_input = ("import {{ cookiecutter.repo_name }}\n"
"from foo import {{ cookiecutter.bar }}\n")
assert SortImports(file_contents=test_input).output == test_input
def test_fcntl():
"""Test to ensure fcntl gets correctly recognized as stdlib import"""
test_input = ("import fcntl\n"
"import os\n"
"import sys\n")
assert SortImports(file_contents=test_input).output == test_input
def test_import_split_is_word_boundary_aware():
"""Test to ensure that isort splits words in a boundry aware mannor"""
test_input = ("from mycompany.model.size_value_array_import_func import ("
" get_size_value_array_import_func_jobs,"
")")
test_output = SortImports(file_contents=test_input,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=79).output
assert test_output == ("from mycompany.model.size_value_array_import_func import \\\n"
" get_size_value_array_import_func_jobs\n")
def test_other_file_encodings():
"""Test to ensure file encoding is respected"""
try:
tmp_dir = tempfile.mkdtemp()
for encoding in ('latin1', 'utf8'):
tmp_fname = os.path.join(tmp_dir, 'test_{0}.py'.format(encoding))
with codecs.open(tmp_fname, mode='w', encoding=encoding) as f:
file_contents = "# coding: {0}\n\ns = u'ã'\n".format(encoding)
f.write(file_contents)
assert SortImports(file_path=tmp_fname).output == file_contents
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
def test_comment_at_top_of_file():
"""Test to ensure isort correctly handles top of file comments"""
test_input = ("# Comment one\n"
"from django import forms\n"
"# Comment two\n"
"from django.contrib.gis.geos import GEOSException\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("# -*- coding: utf-8 -*-\n"
"from django.db import models\n")
assert SortImports(file_contents=test_input).output == test_input
def test_alphabetic_sorting():
"""Test to ensure isort correctly handles top of file comments"""
test_input = ("from django.contrib.gis.geos import GEOSException\n"
"from plone.app.testing import getRoles\n"
"from plone.app.testing import ManageRoles\n"
"from plone.app.testing import setRoles\n"
"from Products.CMFPlone import utils\n"
"\n"
"import ABC\n"
"import unittest\n"
"import Zope\n")
options = {'force_single_line': True,
'force_alphabetical_sort': True, }
assert SortImports(file_contents=test_input, **options).output == test_input
test_input = ("# -*- coding: utf-8 -*-\n"
"from django.db import models\n")
assert SortImports(file_contents=test_input).output == test_input
def test_comments_not_duplicated():
"""Test to ensure comments aren't duplicated: issue 303"""
test_input = ('from flask import url_for\n'
"# Whole line comment\n"
'from service import demo # inline comment\n'
'from service import settings\n')
output = SortImports(file_contents=test_input).output
assert output.count("# Whole line comment\n") == 1
assert output.count("# inline comment\n") == 1
def test_top_of_line_comments():
"""Test to ensure top of line comments stay where they should: issue 260"""
test_input = ('# -*- coding: utf-8 -*-\n'
'from django.db import models\n'
'#import json as simplejson\n'
'from myproject.models import Servidor\n'
'\n'
'import reversion\n'
'\n'
'import logging\n')
output = SortImports(file_contents=test_input).output
assert output.startswith('# -*- coding: utf-8 -*-\n')
def test_basic_comment():
"""Test to ensure a basic comment wont crash isort"""
test_input = ('import logging\n'
'# Foo\n'
'import os\n')
assert SortImports(file_contents=test_input).output == test_input
def test_shouldnt_add_lines():
"""Ensure that isort doesn't add a blank line when a top of import comment is present, issue #316"""
test_input = ('"""Text"""\n'
'# This is a comment\n'
'import pkg_resources\n')
assert SortImports(file_contents=test_input).output == test_input
def test_sections_parsed_correct():
"""Ensure that modules for custom sections parsed as list from config file and isort result is correct"""
tmp_conf_dir = None
conf_file_data = (
'[settings]\n'
'sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER,COMMON\n'
'known_common=nose\n'
'import_heading_common=Common Library\n'
'import_heading_stdlib=Standard Library\n'
)
test_input = (
'import os\n'
'from nose import *\n'
'import nose\n'
'from os import path'
)
correct_output = (
'# Standard Library\n'
'import os\n'
'from os import path\n'
'\n'
'# Common Library\n'
'import nose\n'
'from nose import *\n'
)
try:
tmp_conf_dir = tempfile.mkdtemp()
tmp_conf_name = os.path.join(tmp_conf_dir, '.isort.cfg')
with codecs.open(tmp_conf_name, 'w') as test_config:
test_config.writelines(conf_file_data)
assert SortImports(file_contents=test_input, settings_path=tmp_conf_dir).output == correct_output
finally:
shutil.rmtree(tmp_conf_dir, ignore_errors=True)
def test_alphabetic_sorting_no_newlines():
'''Test to ensure that alphabetical sort does not erroneously introduce new lines (issue #328)'''
test_input = "import os\n"
test_output = SortImports(file_contents=test_input,force_alphabetical_sort=True).output
assert test_input == test_output
test_input = ('from a import b\n'
'\n'
'import os\n'
'import unittest\n'
'\n'
'\n'
'print(1)\n')
test_output = SortImports(file_contents=test_input,force_alphabetical_sort=True, lines_after_imports=2).output
assert test_input == test_output
| mit |
ashhher3/scikit-learn | sklearn/datasets/base.py | 15 | 17969 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/cross_validation.py | 7 | 72106 |
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .utils.random import choice
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneOut` instead.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePOut` instead.
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.KFold` instead.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used as a validation set once while the k - 1 remaining
fold(s) form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GroupKFold` instead.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedKFold` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneGroupOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePGroupsOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ShuffleSplit` instead.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedShuffleSplit` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
for n in range(self.n_iter):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(cls_count, self.n_train, rng)
class_counts_remaining = cls_count - n_i
t_i = _approximate_mode(class_counts_remaining, self.n_test, rng)
train = []
test = []
for i, _ in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
perm_indices_class_i = np.where(
(i == self.y_indices))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.PredefinedSplit` instead.
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GroupShuffleSplit` instead.
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.cross_val_predict` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.cross_val_score` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.check_cv` instead.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` is used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.permutation_test_score` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.train_test_split` instead.
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| mit |
iamshang1/Projects | Advanced_ML/Text_Classification/xg-boost.py | 1 | 1648 | import pickle
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import StratifiedKFold
import xgboost as xgb
#load saved files
print "loading data"
vocab = np.load('embeddings.npy')
with open('data.pkl', 'rb') as f:
data = pickle.load(f)
#convert each doc into a string
print "creating features and labels"
docs = []
labels = []
for key,value in data.iteritems():
docs.append(value['text'])
labels.append(value['label'])
docstrings = []
for doc in docs:
flattened = [word for line in doc for word in line]
docstring = " ".join(flattened)
docstrings.append(docstring)
#tfidf vectorization
vectorizer = TfidfVectorizer(min_df=3, stop_words='english',ngram_range=(1, 2))
X = vectorizer.fit_transform(docstrings)
#label encoder
le = LabelEncoder()
y = le.fit_transform(labels)
#kfold cross validation
splits = 10
kf = StratifiedKFold(n_splits=splits,shuffle=True,random_state=1234)
#classify using XGBoost
print "training XGBoost"
scores = []
i = 0
for train_index, test_index in kf.split(X,y):
i += 1
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
gbm = xgb.XGBClassifier(max_depth=5, learning_rate=0.1, n_estimators=300)
gbm.fit(X_train.tocsc(), y_train)
prediction = gbm.predict(X_test.tocsc())
score = float(np.sum(y_test==prediction))/y_test.shape[0]
scores.append(score)
print "XGBoost - kfold %i of %i accuracy: %.4f%%" % (i,splits,score*100)
print "XGBoost - overall accuracy: %.4f" % (np.mean(scores)*100)
| mit |
kedz/cuttsum | trec2015/sbin/l2s/crossval.py | 1 | 39809 | import cuttsum.events
import cuttsum.corpora
from cuttsum.pipeline import InputStreamResource
from mpi4py import MPI
from cuttsum.misc import enum
from cuttsum.classifiers import NuggetRegressor
import numpy as np
import pandas as pd
import random
import pyvw
from datetime import datetime
from sklearn.feature_extraction import FeatureHasher
from sklearn.metrics.pairwise import cosine_similarity
from itertools import izip
import os
import cuttsum.judgements
from cuttsum.misc import event2semsim
import math
np.random.seed(42)
matches_df = cuttsum.judgements.get_merged_dataframe()
tags = enum("READY", "WORKER_START", "WORKER_STOP")
class FeatureMapper(dict):
def __init__(self):
self.store = dict()
self._inv_store = dict()
self._idx = 0
def __getitem__(self, key):
if key not in self.store:
self.store[key] = self._idx
self._inv_store[self._idx] = key
self._idx += 1
return self.store[key]
def items(self):
return self.store.items()
def inv(self, idx):
return self._inv_store[idx]
fmap = FeatureMapper()
SELECT = 1
SKIP = 2
basic_cols = ["BASIC length", #"BASIC char length",
"BASIC doc position", "BASIC all caps ratio",
"BASIC upper ratio",
# "BASIC lower ratio",
# "BASIC punc ratio",
"BASIC person ratio",
"BASIC location ratio",
"BASIC organization ratio", "BASIC date ratio",
"BASIC time ratio", "BASIC duration ratio",
"BASIC number ratio", "BASIC ordinal ratio",
"BASIC percent ratio",
"BASIC money ratio",
# "BASIC set ratio", "BASIC misc ratio"
]
query_bw_cols = [
"Q_sent_query_cov",
"Q_sent_syn_cov",
"Q_sent_hyper_cov",
"Q_sent_hypo_cov",
]
query_fw_cols = [
"Q_query_sent_cov",
"Q_syn_sent_cov",
"Q_hyper_sent_cov",
"Q_hypo_sent_cov",
]
lm_cols = ["LM domain avg lp",
"LM gw avg lp"]
sum_cols = [
"SUM_sbasic_sum",
"SUM_sbasic_amean",
# "SUM_sbasic_max",
"SUM_novelty_gmean",
"SUM_novelty_amean",
# "SUM_novelty_max",
"SUM_centrality",
"SUM_pagerank",
"SUM_sem_novelty_gmean",
"SUM_sem_novelty_amean",
"SUM_sem_centrality",
"SUM_sem_pagerank",
]
stream_cols = [
"STREAM_sbasic_sum",
"STREAM_sbasic_amean",
"STREAM_sbasic_max",
"STREAM_per_prob_sum",
"STREAM_per_prob_max",
"STREAM_per_prob_amean",
"STREAM_loc_prob_sum",
"STREAM_loc_prob_max",
"STREAM_loc_prob_amean",
"STREAM_org_prob_sum",
"STREAM_org_prob_max",
"STREAM_org_prob_amean",
"STREAM_nt_prob_sum",
"STREAM_nt_prob_max",
"STREAM_nt_prob_amean",
]
best_feats = set([
"LM gw avg lp",
"CACHE_SIM_amean",
"STREAM_sbasic_max",
"CACHE_SIM_max",
"STREAM_loc_prob_amean^df^prob",
"BASIC ordinal ratio^df^prob",
"STREAM_per_prob_max",
"BASIC ordinal ratio",
"STREAM_sbasic_max^df",
"SUM_pagerank",
"BASIC date ratio^df^prob",
"SUM_sem_centrality^prob",
"BASIC doc position^df",
"STREAM_org_prob_amean",
"STREAM_nt_prob_sum^df",
"STREAM_loc_prob_sum",
"STREAM_loc_prob_max^df",
"STREAM_nt_prob_sum^prob",
"BASIC location ratio^df",
"BASIC all caps ratio^prob",
"BASIC organization ratio^df^prob",
"SUM_sbasic_sum^df",
"STREAM_org_prob_sum^prob",
"BASIC money ratio^prob",
"CONSTANT",
"STREAM_loc_prob_max",
"STREAM_org_prob_amean^prob",
"STREAM_nt_prob_max^prob",
"SUM_sbasic_sum^df^prob",
"STREAM_nt_prob_sum",
"LM domain avg lp^df",
"BASIC number ratio^df^prob",
"CACHE_SEM_SIM_amean",
"Q_syn_sent_cov",
"BASIC percent ratio",
"BASIC time ratio^df^prob",
"BASIC date ratio^prob",
"BASIC person ratio^prob",
"STREAM_sbasic_sum^df",
"BASIC location ratio^df^prob",
"BASIC money ratio",
"BASIC duration ratio^df^prob",
"BASIC location ratio^prob",
"BASIC duration ratio^prob",
"BASIC person ratio^df",
"STREAM_sbasic_amean",
"BASIC date ratio",
"SUM_sem_centrality^df",
"BASIC time ratio^df",
"STREAM_sbasic_sum",
])
class Summarizer(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw , sch, num_actions)
#sch.set_options( sch.AUTO_HAMMING_LOSS )
self._with_scores = False
self._loss = 1
self.total_loss = 0
self.log_time = False
self.use_best_feats = False
self.use_i_only = False
self.use_abs_df = False
self._scores = []
self._keep_scores = False
self._doc_condition = False
def feat_weight(self, idx):
if idx >= fmap._idx:
ridx = idx - fmap._idx
else:
ridx = idx
ex = self.example({"a": [(idx, 1)]})
w = self.vw.get_weight(ex.feature("a", 0))
return fmap.inv(ridx), idx, w
def get_feature_weights(self):
select_feats = []
for i in xrange(fmap._idx):
fname, idx, weight = self.feat_weight(i)
select_feats.append({"name": fname, "index": idx, "weight": weight})
select_feats.sort(key=lambda x: x["weight"])
select_df = pd.DataFrame(select_feats, columns=["name", "index", "weight"])
next_feats = []
for i in xrange(fmap._idx, fmap._idx * 2):
fname, idx, weight = self.feat_weight(i)
next_feats.append({"name": fname, "index": idx, "weight": weight})
next_feats.sort(key=lambda x: x["weight"])
next_df = pd.DataFrame(next_feats, columns=["name", "index", "weight"])
return select_df, next_df
def set_weights(self, weights_df):
for _, row in weights_df.iterrows():
idx = row["index"]
weight = row["weight"]
self.vw.set_weight(idx, 0, weight)
#for i, feat in enumerate(self.basic_cols()):
#fw.append(("b:" + feat, w))
#fw.append(("n:bias", self.vw.get_weight(ex.feature("n", 0))))
def set_loss(self, loss):
self._loss = loss
def make_example(self, sent, cache, cache_in, days, x, cache_latent, dfdelta, ):
if self.log_time is True:
days = np.log(2 + days)
df = sent.to_frame().transpose()
if self._doc_condition is True:
if cache is not None:
doc_condition = (cache["stream id"] == sent["stream id"]).astype("int32").sum()
dc_feat = [("NUM_PREV_SELECTED_IN_DOC_{}".format(doc_condition), 1)]
else:
dc_feat = [("NUM_PREV_SELECTED_IN_DOC_0", 1)]
else: dc_feat = []
feats = {
"b": [(k, df[k].tolist()[0]) for k in basic_cols],
"c": [(k, df[k].tolist()[0]) for k in sum_cols],
"q": [(k, df[k].tolist()[0]) for k in query_fw_cols],
"l": [(k, df[k].tolist()[0]) for k in lm_cols],
"s": [(k, df[k].tolist()[0]) for k in stream_cols],
"p": [("probs",sent["probs"])],
"d": dc_feat,
#"t": [("time", days)],
# "g": ["GAIN_POS" if gain > 0 else "GAIN_ZERO"],
}
if cache is None:
feats["g"] = [("EMPTY", 1)]
else:
h = FeatureHasher(input_type="string")
X_c = h.transform(cache["lemmas stopped"].tolist())
x_i = h.transform([sent["lemmas stopped"]])
K = cosine_similarity(X_c, x_i)
k_u = K.mean()
k_max = K.max()
if k_max == 0:
feats["g"] = [("CACHE_SIM_ZERO", 1)]
else:
feats["g"] = [("CACHE_SIM_amean", k_u), ("CACHE_SIM_max", k_max)]
K_l = cosine_similarity(cache_latent, x)
k_lu = K_l.mean()
k_lmax = K_l.max()
if k_lmax == 0:
feats["g"].append(("CACHE_SEM_SIM_ZERO", 1))
else:
feats["g"].extend([("CACHE_SEM_SIM_amean", k_lu), ("CACHE_SEM_SIM_max", k_lmax)])
feats["I"] = []
# for ns in ["b", "c", "q", "l", "s", "g"]:
# for feat, val in feats[ns]:
# feats["I"].append(("{}^time^prob".format(feat), val * sent["probs"] * days))
# feats["I"].append(("{}^prob".format(feat), val * sent["probs"]))
# feats["I"].append(("{}^time".format(feat), val * days))
for ns in ["b", "c", "q", "l", "s", "g", "d"]:
for feat, val in feats[ns]:
feats["I"].append(("{}^df^prob".format(feat), val * sent["probs"] * dfdelta))
feats["I"].append(("{}^prob".format(feat), val * sent["probs"]))
feats["I"].append(("{}^df".format(feat), val * dfdelta))
ifeats = {'a': []}
if self.use_i_only:
NS = ["I"]
else:
NS = ["b", "c", "q", "l", "s", "g", "p", "d", "I"]
if self.use_best_feats:
for ns in NS:
for key, val in feats[ns]:
if key not in best_feats: continue
ifeats['a'].append((fmap[key], val))
ifeats['a'].append((fmap["CONSTANT"], 1))
else:
for ns in NS:
for key, val in feats[ns]:
ifeats['a'].append((fmap[key], val))
ifeats['a'].append((fmap["CONSTANT"], 1))
ex = self.example(ifeats)
#select_weight = sum(self.vw.get_weight(idx, 0) * val for idx, val in ifeats['a'])
#next_weight = sum(self.vw.get_weight(fmap._idx + idx , 0) * val for idx, val in ifeats['a'])
#print select_weight, next_weight
#self._scores.append({"SELECT": select_weight, "NEXT": next_weight})
# self._scores.append(ex.get_partial_prediction())
#print select_weight, next_weight, self.example(ifeats).get_label() #, self.example(ifeats).get_costsensitive_partial_prediction(1) #self.vw.get_partial_prediction(self.example(ifeats)
return ex
def _run(self, (event, df_stream, X_stream, dfdeltas)):
nuggets = set()
cache = None
cache_in = None
cache_latent = None
output = []
current_dfdelta_idx = 0
current_dfdelta = 0
n = 0
y_int_y_hat = 0
size_y = 0
size_y_hat = 0
loss = 0
for (_, sent), x in zip(df_stream.iterrows(), X_stream):
intts = int(sent["timestamp"])
if intts > dfdeltas[current_dfdelta_idx +1][0]:
current_dfdelta_idx += 1
current_dfdelta = dfdeltas[current_dfdelta_idx][1]
if self.use_abs_df:
current_dfdelta = abs(current_dfdelta)
days = (datetime.utcfromtimestamp(int(sent["timestamp"])) - event.start).total_seconds() / (60. * 60. * 24.)
n += 1
gain = len(sent["nuggets"] - nuggets)
if self.sch.predict_needs_example():
examples = self.make_example(sent, cache, cache_in, days, x, cache_latent, current_dfdelta)
else:
examples = None
if gain > 0:
oracle = SELECT
else:
oracle = SKIP
# Make prediction.
pred = self.sch.predict(
examples=examples,
my_tag=n,
oracle=oracle,
condition=[], # (n-1, "p"), ])
)
output.append(pred)
#if examples is not None:
if self._keep_scores:
#if self._keep_scores:
#print "HERE"
#lab = pyvw.cost_sensitive_label()
#print "HERE2"
#lab.from_example(examples)
#print "HERE3"
#for wc in lab.costs:
# print wc.partial_prediction,
#print pred
#print examples.get_costsensitive_partial_prediction(1)
#select_weight = sum(self.vw.get_weight(idx, 0) * val for idx, val in ifeats['a'])
#next_weight = sum(self.vw.get_weight(fmap._idx + idx , 0) * val for idx, val in ifeats['a'])
#self._scores.append({"SELECT": select_weight, "NEXT": next_weight})
self._scores.append(examples.get_partial_prediction())
#if examples is not None:
# print self._scores[-1], examples.get_partial_prediction(), "SELECT" if oracle == SELECT else "NEXT", "PRED: SELECT" if pred == SELECT else "PRED: NEXT"
#print examples.get_simplelabel_label(), examples.get_multiclass_label(), oracle, 1 / (1 + math.exp(-examples.get_partial_prediction())), pred
#print pyvw.cost_sensitive_label().from_example(examples)
# if self._keep_scores:
# ascore = self._scores[-1]
# print ascore, ascore["SELECT"] >= ascore["NEXT"] if pred == SELECT else ascore["SELECT"] <= ascore["NEXT"], ascore["SELECT"] + ascore["NEXT"]
#print select_weight, next_weight, self.example(ifeats).get_label() #, self.example(ifeats).get_costsensitive_partial_prediction(1) #self.vw.get_partial_prediction(self.example(ifeats)
if pred != oracle:
if oracle == SELECT:
loss += self._loss
else:
loss += 1
if pred == SELECT and oracle == SELECT:
y_int_y_hat += 1
size_y += 1
size_y_hat += 1
elif pred == SELECT and oracle == SKIP:
size_y_hat += 1
elif pred == SKIP and oracle == SELECT:
size_y += 1
#if self._with_scores is True:
# print "examining:", sent["pretty text"]
if pred == SELECT:
nuggets.update(sent["nuggets"])
if cache is None:
cache = sent.to_frame().transpose()
cache_latent = x
else:
cache = pd.concat([cache, sent.to_frame().transpose()])
cache_latent = np.vstack([cache_latent, x])
# else:
# if cache_in is None:
# cache_in = sent.to_frame().transpose()
# else:
# cache_in = pd.concat([cache_in, sent.to_frame().transpose()])
Z = size_y + size_y_hat
if Z == 0: Z = 1
loss = 1 - float(y_int_y_hat) / Z
self.sch.loss(loss)
self.total_loss += loss
return output
def start_manager(event_ids, output_dir):
events = [e for e in cuttsum.events.get_events()
if e.query_num in event_ids]
jobs = [(events[0:i] + events[i+1:], test_event)
for i, test_event in enumerate(events)]
comm = MPI.COMM_WORLD #.Accept(port, info, 0)
status = MPI.Status() # get MPI status object
if not os.path.exists(output_dir):
os.makedirs(output_dir)
w_path = os.path.join(output_dir, "weights.tsv")
s_path = os.path.join(output_dir, "scores.tsv")
t_path = os.path.join(output_dir, "summary.tsv")
n_workers = comm.size - 1
first_write = True
with open(w_path, "w") as w_f, open(s_path, "w") as s_f, \
open(t_path, "w") as t_f:
while n_workers > 0:
data = comm.recv(
source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
print "STATUS", tag, "SOURCE", source
if tag == tags.READY:
if len(jobs) > 0:
job = jobs.pop(0)
comm.send(job, dest=source, tag=tags.WORKER_START)
else:
comm.send(None, dest=source, tag=tags.WORKER_STOP)
n_workers -= 1
if data is not None:
scores_df, weights_df, summary_df = data
scores_df.to_csv(s_f, sep="\t", index=False, header=first_write)
s_f.flush()
weights_df.to_csv(w_f, sep="\t", index=False, header=first_write)
w_f.flush()
summary_df.to_csv(t_f, sep="\t", index=False, header=first_write)
t_f.flush()
first_write = False
def get_all_semsim():
accident_semsim = event2semsim("accident")
natdis_semsim = event2semsim("earthquake")
social_semsim = event2semsim("protest")
terror_semsim = event2semsim("shooting")
return {
"accident": accident_semsim,
"earthquake": natdis_semsim,
"storm": natdis_semsim,
"impact event": natdis_semsim,
"shooting": terror_semsim,
"hostage": terror_semsim,
"conflict": terror_semsim,
"bombing": terror_semsim,
"protest": social_semsim,
"riot": social_semsim,
}
def get_dfdeltas():
with open("doc_freqs.tsv", "r") as f:
df = pd.read_csv(f, sep="\t")
def get(event):
df_e = df[df["event"] == event.query_id]
mylist = [[0,0]] + zip(df_e["hour"].tolist(), df_e["df delta"].tolist())
return mylist
return get
def start_worker(sample_size, samples_per_event, gold_probs, iters, l2, log_time,
use_best_feats, use_i_only, use_abs_df):
rank = MPI.COMM_WORLD.Get_rank()
status = MPI.Status() # get MPI status object
job_results = None
semsims = get_all_semsim()
dfdeltas = get_dfdeltas()
while True:
comm.send(job_results, dest=0, tag=tags.READY)
data = comm.recv(
source=0, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
if tag == tags.WORKER_START:
training_events, test_event = data
print "JOBBING", test_event.fs_name()
job_results = do_work(
training_events, test_event,
sample_size, samples_per_event, gold_probs, iters, l2, log_time, semsims, dfdeltas,
use_best_feats, use_i_only, use_abs_df)
if tag == tags.WORKER_STOP:
break
def do_work(training_events, test_event, sample_size, samples_per_event,
gold_probs, iters, l2, log_time, semsims, dfdeltas,
use_best_feats, use_i_only, use_abs_df):
training_streams = []
summary = []
for event in training_events:
df = get_input_stream(event, gold_probs)
training_streams.append((event, df))
test_df = get_input_stream(test_event, gold_probs)
test_X_l = semsims[test_event.type].transform(
test_df["stems"].apply(lambda x: ' '.join(x)).tolist())
test_stream = (test_event, test_df, test_X_l, dfdeltas(test_event))
vw = pyvw.vw(
("--l2 {} --search 2 --search_task hook --ring_size 1024 " + \
"--search_no_caching --noconstant --quiet").format(l2))
task = vw.init_search_task(Summarizer)
task.use_best_feats = use_best_feats
task.use_i_only = use_i_only
task.use_abs_df = use_abs_df
print "use best?", task.use_best_feats
print "use i only?", task.use_i_only
print "use abs df?", task.use_abs_df
task.log_time = log_time
all_scores = []
all_weights = []
instances = []
for sample in xrange(samples_per_event):
for event, stream in training_streams:
while 1:
sample_stream = ds(stream, sample_size=sample_size)
if (sample_stream["nuggets"].apply(len) > 0).any():
break
X_l = semsims[event.type].transform(
sample_stream["stems"].apply(lambda x: ' '.join(x)).tolist())
instances.append((event, sample_stream, X_l, dfdeltas(event)))
for n_iter in xrange(1, iters + 1):
task.total_loss = 0
#instances = [(event, ds(stream, sample_size=sample_size))
# for event, stream in training_streams
# for sample in xrange(samples_per_event)]
random.shuffle(instances)
for i, inst in enumerate(instances):
print "{}.{}.{}/{}".format(
test_event.fs_name(), n_iter, i, len(instances))
task.learn([inst])
print "{}.{}.p".format(
test_event.fs_name(), n_iter)
train_egain = 0
train_comp = 0
train_f1 = 0
train_loss = 0
for i, inst in enumerate(instances):
egain, comp, f1, loss, train_sum = predict(task, inst, n_iter)
train_egain += egain
train_comp += comp
train_f1 += f1
train_loss += loss
train_egain = train_egain / float(len(instances))
train_comp = train_comp / float(len(instances))
train_f1 = train_f1 / float(len(instances))
train_loss = train_loss / float(len(instances))
print "{} {} train loss {}".format(test_event.query_id, n_iter, train_loss)
pred = task.predict(test_stream)
select_df, next_df = task.get_feature_weights()
select_df["class"] = "SELECT"
select_df["iter"] = n_iter
next_df["class"] = "NEXT"
next_df["iter"] = n_iter
all_weights.append(select_df)
all_weights.append(next_df)
pred = ["SELECT" if p == SELECT else "SKIP" for p in pred]
all_nuggets = set()
for nuggets in test_stream[1]["nuggets"].tolist():
all_nuggets.update(nuggets)
loss = 0
y_int_y_hat = 0
size_y = 0
size_y_hat = 0
nuggets = set()
for action, (_, sent) in izip(pred, test_stream[1].iterrows()):
gain = len(sent["nuggets"] - nuggets)
if action == "SELECT":
if gain == 0:
loss += 1
summary.append({
"event": test_event.query_id,
"iter": n_iter,
"update id": sent["update id"],
"timestamp": sent["timestamp"],
"gain": gain,
"nuggets": ",".join(sent["nuggets"]),
"update text": sent["pretty text"]
})
nuggets.update(sent["nuggets"])
else:
if gain > 0:
loss += 1
if gain > 0:
oracle = "SELECT"
else:
oracle = "SKIP"
if action == "SELECT" and oracle == "SELECT":
y_int_y_hat += 1
size_y += 1
size_y_hat += 1
elif action == "SELECT" and oracle == "SKIP":
size_y_hat += 1
elif action == "SKIP" and oracle == "SELECT":
size_y += 1
if size_y_hat == 0:
print test_event
print (test_stream[1]["nuggets"].apply(len) > 0).any()
loss = 1 - float(y_int_y_hat) / (size_y + size_y_hat)
if len(nuggets) > 0:
egain = len(nuggets) / sum([1.0 if a == "SELECT" else 0.0 for a in pred])
else:
egain = 0
comp = len(nuggets) / float(len(all_nuggets))
all_scores.append({"iter": n_iter, "Comp.": comp,
"E[gain]": egain, "Loss": loss,
"Avg. Train Loss": train_loss,
"Avg. Train E[gain]": train_egain,
"Avg. Train Comp.": train_comp,
"Avg. Train F1": train_f1,
})
print "{}.{}.p E[gain]={:0.6f} Comp.={:0.6f} Train Loss={:0.6f}".format(
test_event.fs_name(), n_iter, egain, comp, train_loss)
scores_df = pd.DataFrame(all_scores, columns=["iter", "E[gain]", "Comp.", "Loss", "Avg. Train Loss", "Avg. Train E[gain]", "Avg. Train Comp.", "Avg. Train F1"])
weights_df = pd.concat(all_weights)
weights_df["event"] = test_event.query_id
scores_df["event"] = test_event.query_id
summary_df = pd.DataFrame(
summary,
columns=["iter", "event", "update id", "timestamp", "gain",
"update text", "nuggets"])
return scores_df, weights_df, summary_df
def get_input_stream(event, gold_probs, extractor="goose", thresh=.8, delay=None, topk=20,
use_2015F=False, truncate=False):
max_nuggets = 3
corpus = cuttsum.corpora.get_raw_corpus(event)
if use_2015F is True and event.query_num > 25:
corpus = cuttsum.corpora.FilteredTS2015()
print event, corpus
res = InputStreamResource()
df = pd.concat(
res.get_dataframes(event, corpus, extractor, thresh, delay, topk))
selector = (df["n conf"] == 1) & (df["nugget probs"].apply(len) == 0)
df.loc[selector, "nugget probs"] = df.loc[selector, "nuggets"].apply(lambda x: {n:1 for n in x})
df["true probs"] = df["nugget probs"].apply(lambda x: [val for key, val in x.items()] +[0])
df["true probs"] = df["true probs"].apply(lambda x: np.max(x))
df.loc[(df["n conf"] == 1) & (df["nuggets"].apply(len) == 0), "true probs"] = 0
if gold_probs is True:
df["probs"] = df["true probs"]
else:
df["probs"] = NuggetRegressor().predict(event, df)
df["nuggets"] = df["nugget probs"].apply(
lambda x: set([key for key, val in x.items() if val > .9]))
nid2time = {}
nids = set(matches_df[matches_df["query id"] == event.query_id]["nugget id"].tolist())
for nid in nids:
ts = matches_df[matches_df["nugget id"] == nid]["update id"].apply(lambda x: int(x.split("-")[0])).tolist()
ts.sort()
nid2time[nid] = ts[0]
fltr_nuggets = []
for name, row in df.iterrows():
fltr_nuggets.append(
set([nug for nug in row["nuggets"] if nid2time[nug] <= row["timestamp"]]))
#print df[["nuggets", "timestamp"]].apply(lambda y: print y[0]) # datetime.utcfromtimestamp(int(y["timestamp"])))
#print nids
df["nuggets"] = fltr_nuggets
df["nuggets"] = df["nuggets"].apply(lambda x: x if len(x) <= max_nuggets else set([]))
from cuttsum.pipeline import DedupedArticlesResource
ded = DedupedArticlesResource()
stats_df = ded.get_stats_df(event, corpus, extractor, thresh)
stats_df["stream ids"] = stats_df["stream ids"].apply(lambda x: set(eval(x)))
sid2match = {}
for _, row in stats_df.iterrows():
for sid in row["stream ids"]:
sid2match[sid] = row["match"]
all_ts = []
all_docs = []
new_docs = []
for (sid, ts), doc in df.groupby(["stream id", "timestamp"]):
if truncate is True:
doc = doc.iloc[0:5]
# print sub_doc
if len(all_ts) > 0:
assert ts >= all_ts[-1]
all_ts.append(ts)
if sid2match[sid] is True:
new_docs.append(doc)
all_docs.append(doc)
df = pd.concat(new_docs)
print len(all_docs), len(new_docs)
return df
def ds(df, sample_size=100):
I = np.arange(len(df))
np.random.shuffle(I)
I = I[:sample_size]
I = np.sort(I)
return df.iloc[I]
def predict(task, event_stream, n_iter):
# task._keep_scores = True
pred = task.predict(event_stream)
pred = ["SELECT" if p == SELECT else "SKIP" for p in pred]
# scores = task._scores
# for score, action in zip(scores, pred):
# sel = math.exp(- ( score["SELECT"]))
# nex = math.exp(- score["NEXT"])
# Z = sel + nex
# p_sel = 1. / (1. + sel)
# p_sel = sel / Z
# p_nex = 1. / (1. + nex) #
# p_nex = nex / Z
# print p_sel, p_nex, action
#p_nex, action
all_nuggets = set()
for nuggets in event_stream[1]["nuggets"].tolist():
all_nuggets.update(nuggets)
loss = 0
y_int_y_hat = 0
size_y = 0
size_y_hat = 0
summary = []
nuggets = set()
for action, (_, sent) in izip(pred, event_stream[1].iterrows()):
gain = len(sent["nuggets"] - nuggets)
if action == "SELECT":
summary.append({
"event": event_stream[0].query_id,
"iter": n_iter,
"update id": sent["update id"],
"timestamp": sent["timestamp"],
"gain": gain,
"nuggets": ",".join(sent["nuggets"]),
"update text": sent["pretty text"]
})
nuggets.update(sent["nuggets"])
if gain > 0:
oracle = "SELECT"
else:
oracle = "SKIP"
if action == "SELECT" and oracle == "SELECT":
y_int_y_hat += 1
size_y += 1
size_y_hat += 1
elif action == "SELECT" and oracle == "SKIP":
size_y_hat += 1
elif action == "SKIP" and oracle == "SELECT":
size_y += 1
if size_y + size_y_hat == 0:
loss = 1
else:
loss = 1 - float(y_int_y_hat) / (size_y + size_y_hat)
if len(nuggets) > 0:
egain = len(nuggets) / sum([1.0 if a == "SELECT" else 0.0 for a in pred])
else:
egain = 0
comp = len(nuggets) / float(len(all_nuggets))
f1 = 2 * ( egain * comp) / (egain + comp) if egain + comp > 0 else 0
return egain, comp, f1, loss, summary
def do_work(train_instances, dev_instances, test_instances, sample_size, samples_per_event,
gold_probs, iters, l2, log_time, semsims, dfdeltas,
use_best_feats, use_i_only, use_abs_df, doc_condition, output_dir):
vw = pyvw.vw(
("-l .5 --l2 {} --search 2 --search_task hook --ring_size 1024 " + \
"--search_no_caching --noconstant --quiet").format(l2))
task = vw.init_search_task(Summarizer)
task.use_best_feats = use_best_feats
task.use_i_only = use_i_only
task.use_abs_df = use_abs_df
task._doc_condition = doc_condition
print "use best?", task.use_best_feats
print "use i only?", task.use_i_only
print "use abs df?", task.use_abs_df
print "use doc condition?", task._doc_condition
all_scores = []
all_weights = []
for n_iter in xrange(1, iters + 1):
task.total_loss = 0
random.shuffle(train_instances)
print "iter", n_iter
task.learn(train_instances)
for i, inst in enumerate(dev_instances):
egain, comp, f1, loss, _ = predict(task, inst, n_iter)
print egain, comp, f1, loss
all_scores.append(
{"iter": n_iter, "E[gain]": egain, "Comp.": comp, "F1": f1,
"Loss": loss}
)
df = pd.DataFrame(all_scores)
df_u = df.groupby("iter").mean().reset_index(drop=True)
print df_u
select_df, next_df = task.get_feature_weights()
select_df["class"] = "SELECT"
select_df["iter"] = n_iter
next_df["class"] = "NEXT"
next_df["iter"] = n_iter
all_weights.append(select_df)
all_weights.append(next_df)
best_f1_iter = df_u["F1"].argmax() + 1
best_egain_iter = df_u["E[gain]"].argmax() + 1
best_comp_iter = df_u["Comp."].argmax() + 1
best_loss_iter = df_u["Loss"].argmin() + 1
weights_df = pd.concat(all_weights)
all_summaries = []
# all_scores = []
F1_weights = weights_df[weights_df["iter"] == best_f1_iter]
loss_weights = weights_df[weights_df["iter"] == best_loss_iter]
egain_weights = weights_df[weights_df["iter"] == best_egain_iter]
comp_weights = weights_df[weights_df["iter"] == best_comp_iter]
def get_summaries(weights, run):
print "Best", run
task.set_weights(weights)
for test_instance in test_instances:
event = test_instance[0]
df = test_instance[1]
print event
task._keep_scores = True
task._scores = []
predictions = task.predict(test_instance)
assert len(predictions) == len(task._scores)
for action, (_, row), ascore in zip(predictions, df.iterrows(), task._scores):
if action == SELECT:
# assert ascore["SELECT"] <= ascore["NEXT"]
print "{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
event.query_num, "CUNLP", run,
"-".join(row["update id"].split("-")[0:2]),
row["update id"].split("-")[2],
row["timestamp"], ascore)
all_summaries.append(
{"event": event.query_num,
"team": "CUNLP",
"run": run,
"stream id": "-".join(row["update id"].split("-")[0:2]),
"sentence id": row["update id"].split("-")[2],
"timestamp": row["timestamp"],
"confidence": row["probs"],
"partial": ascore,
"text": row["sent text"],
"pretty text": row["pretty text"]
})
get_summaries(F1_weights, "L2S.F1")
get_summaries(loss_weights, "L2S.Loss")
get_summaries(egain_weights, "L2S.E[gain]")
get_summaries(comp_weights, "L2S.Comp.")
df = pd.DataFrame(all_summaries,
columns=["event", "team", "run", "stream id", "sentence id",
"timestamp", "confidence", "partial", "pretty text", "text"])
submission_path = os.path.join(output_dir, "submission.tsv")
summary_path = os.path.join(output_dir, "summaries.tsv")
f1_weights_path = os.path.join(output_dir, "weights.f1.tsv")
loss_weights_path = os.path.join(output_dir, "weights.loss.tsv")
egain_weights_path = os.path.join(output_dir, "weights.egain.tsv")
comp_weights_path = os.path.join(output_dir, "weights.comp.tsv")
scores_path = os.path.join(output_dir, "scores.tsv")
no_text = ["event", "team", "run", "stream id", "sentence id",
"timestamp", "confidence", "partial"]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
df["confidence"] = df["confidence"].apply(lambda x: max(x, 0))
with open(submission_path, "w") as f:
df[no_text].to_csv(f, index=False, header=False, sep="\t")
with open(summary_path, "w") as f:
df.to_csv(f, index=False, sep="\t")
with open(f1_weights_path, "w") as f:
F1_weights.to_csv(f, index=False, sep="\t")
with open(loss_weights_path, "w") as f:
loss_weights.to_csv(f, index=False, sep="\t")
with open(egain_weights_path, "w") as f:
egain_weights.to_csv(f, index=False, sep="\t")
with open(comp_weights_path, "w") as f:
comp_weights.to_csv(f, index=False, sep="\t")
with open(scores_path, "w") as f:
df_u.to_csv(f, sep="\t", index=False)
def make_subsamples(streams, samples_per_stream, sample_size, dfdeltas, semsims):
instances = []
for sample in xrange(samples_per_stream):
for event, stream in streams:
while 1:
sample_stream = ds(stream, sample_size=sample_size)
if (sample_stream["nuggets"].apply(len) > 0).any():
break
X_l = semsims[event.type].transform(
sample_stream["stems"].apply(lambda x: ' '.join(x)).tolist())
instances.append((event, sample_stream, X_l, dfdeltas(event)))
return instances
def main(test_event, sample_size, samples_per_event, gold_probs, iters, l2,
log_time, use_best_feats, use_i_only, use_abs_df, output_dir,
use_2015F, truncate, doc_condition):
semsims = get_all_semsim()
dfdeltas = get_dfdeltas()
dev_ids = [19, 23, 27, 34, 35]
streams = []
for event in cuttsum.events.get_events():
if event.query_num in set([24, 7, test_event] + dev_ids): continue
#or event.query_num > 25: continue
df = get_input_stream(
event, gold_probs, use_2015F=use_2015F, truncate=truncate)
streams.append((event, df))
dev_instances = []
for event in cuttsum.events.get_events():
if event.query_num not in dev_ids: continue
df = get_input_stream(
event, gold_probs, use_2015F=use_2015F, truncate=truncate)
X_l = semsims[event.type].transform(
df["stems"].apply(lambda x: ' '.join(x)).tolist())
dev_instances.append((event, df, X_l, dfdeltas(event)))
test_streams = []
for event in cuttsum.events.get_events():
if event.query_num != test_event: continue
df = get_input_stream(
event, gold_probs, use_2015F=use_2015F, truncate=truncate)
X_l = semsims[event.type].transform(
df["stems"].apply(lambda x: ' '.join(x)).tolist())
test_streams.append((event, df, X_l, dfdeltas(event)))
train_instances = make_subsamples(streams, samples_per_event,
sample_size, dfdeltas, semsims)
#dev_instances = make_subsamples(streams, samples_per_event,
# sample_size, dfdeltas, semsims)
job_results = do_work(
train_instances, dev_instances, test_streams,
sample_size, samples_per_event, gold_probs, iters, l2, log_time,
semsims, dfdeltas, use_best_feats, use_i_only, use_abs_df,
doc_condition, output_dir)
if __name__ == u"__main__":
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument(u"--event-ids", type=int, nargs=u"+",
# help=u"event ids to select.")
parser.add_argument(u"--sample-size", type=int,
default=100,
help=u"downsample size for each training instance.")
parser.add_argument(
u"--samples-per-event", type=int,default=10,
help=u"number of training instances to make from each event.")
parser.add_argument(
u"--gold-probs", type=bool, default=False,
help=u"Use gold nugget probability feature.")
parser.add_argument(u"--iters", type=int,
default=10,
help=u"Training iters")
parser.add_argument(u"--output-dir", type=str,
required=True, help="directory to write results.")
parser.add_argument(u"--l2", type=float,
default=0, help="l2 weight")
parser.add_argument(
u"--best-feats", action="store_true", default=False,
help=u"Use best features")
parser.add_argument(
u"--i-only", action="store_true", default=False,
help=u"Use interactions only")
parser.add_argument(
u"--abs-df", action="store_true", default=False,
help=u"Use absolute value of df deltas.")
parser.add_argument(
u"--filter", action="store_true", default=False,
help=u"Use 2015F corpus.")
parser.add_argument(
u"--truncate", action="store_true", default=False,
help=u"Use first 5 sentences per doc.")
parser.add_argument(
u"--doc-condition", action="store_true", default=False,
help=u"Condition on number of selects in current document")
parser.add_argument(
u"--log-time", action="store_true", default=False,
help=u"Use log(t) feature")
parser.add_argument(
u"--test-event", type=int, required=True)
args = parser.parse_args()
main(args.test_event, args.sample_size, args.samples_per_event,
args.gold_probs, args.iters, args.l2, args.log_time,
args.best_feats, args.i_only, args.abs_df, args.output_dir,
args.filter, args.truncate, args.doc_condition)
| apache-2.0 |
txominpelu/airflow | setup.py | 2 | 2145 | from setuptools import setup, find_packages
import sys
# Kept manually in sync with airflow.__version__
version = '1.3.0'
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'pyhs2>=0.6.0',
]
mysql = ['mysql-python>=1.2.5']
postgres = ['psycopg2>=2.6']
optional = ['librabbitmq>=1.6.1']
samba = ['pysmbclient>=0.1.3']
druid = ['pydruid>=0.2.1']
s3 = ['boto>=2.36.0']
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.13.0']
hdfs = ['snakebite>=2.4.13']
slack = ['slackclient>=0.15']
all_dbs = postgres + mysql + hive + mssql + hdfs
devel = all_dbs + doc + samba + s3 + ['nose'] + slack
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
version=version,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'celery>=3.1.17',
'chartkick>=0.4.2',
'dill>=0.2.2',
'flask>=0.10.1',
'flask-admin>=1.0.9',
'flask-cache>=0.13.1',
'flask-login>=0.2.11',
'flower>=0.7.3',
'future>=0.15.0',
'jinja2>=2.7.3',
'markdown>=2.5.2',
'pandas>=0.15.2',
'pygments>=2.0.1',
'python-dateutil>=2.3',
'requests>=2.5.1',
'setproctitle>=1.1.8',
'sqlalchemy>=0.9.8',
'statsd>=3.0.1',
'thrift>=0.9.2',
'tornado>=4.0.2',
],
extras_require={
'all': devel + optional,
'all_dbs': all_dbs,
'devel': devel,
'doc': doc,
'druid': druid,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'mssql': mssql,
'mysql': mysql,
'postgres': postgres,
's3': s3,
'samba': samba,
'slack': slack,
},
author='Maxime Beauchemin',
author_email='[email protected]',
url='https://github.com/airbnb/airflow',
download_url=(
'https://github.com/airbnb/airflow/tarball/' + version),
)
| apache-2.0 |
wilsonianb/nacl_contracts | buildbot/buildbot_pnacl.py | 2 | 3776 | #!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from buildbot_lib import (
BuildContext, BuildStatus, Command, ParseStandardCommandLine,
RemoveSconsBuildDirectories, RemoveGypBuildDirectories, RunBuild, SCons,
Step )
def SetupGypDefines(context, extra_vars=[]):
context.SetEnv('GYP_DEFINES', ' '.join(context['gyp_vars'] + extra_vars))
def SetupLinuxEnvironment(context):
SetupGypDefines(context, ['target_arch=' + context['gyp_arch']])
def BuildScriptX86(status, context):
# Clean out build directories.
with Step('clobber', status):
RemoveSconsBuildDirectories()
RemoveGypBuildDirectories()
# Unlike their arm counterparts we do not run trusted tests on x86 bots.
# Trusted tests get plenty of coverage by other bots, e.g. nacl-gcc bots.
# We make the assumption here that there are no "exotic tests" which
# are trusted in nature but are somehow depedent on the untrusted TC.
flags_build = ['skip_trusted_tests=1', 'do_not_run_tests=1']
flags_run = ['skip_trusted_tests=1']
smoke_tests = ['small_tests', 'medium_tests']
with Step('build_all', status):
SCons(context, parallel=True, args=flags_build)
# Normal pexe-mode tests
with Step('smoke_tests', status, halt_on_fail=False):
SCons(context, parallel=True, args=flags_run + smoke_tests)
# Large tests cannot be run in parallel
with Step('large_tests', status, halt_on_fail=False):
SCons(context, parallel=False, args=flags_run + ['large_tests'])
# non-pexe-mode tests. Build everything to make sure it all builds in nonpexe
# mode, but just run the nonpexe_tests
with Step('build_nonpexe', status):
SCons(context, parallel=True, args=flags_build + ['pnacl_generate_pexe=0'])
with Step('nonpexe_tests', status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + ['pnacl_generate_pexe=0', 'nonpexe_tests'])
irt_mode = context['default_scons_mode'] + ['nacl_irt_test']
smoke_tests_irt = ['small_tests_irt', 'medium_tests_irt']
# Run some tests with the IRT
with Step('smoke_tests_irt', status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + smoke_tests_irt)
# Test sandboxed translation
with Step('smoke_tests_sandboxed_translator', status):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + ['use_sandboxed_translator=1'] + smoke_tests_irt)
with Step('smoke_tests_sandboxed_fast', status):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + ['use_sandboxed_translator=1', 'translate_fast=1']
+ smoke_tests_irt)
# Translator memory consumption regression test
with Step('large_code_test', status):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + ['use_sandboxed_translator=1', 'large_code'])
# Test Non-SFI Mode.
with Step('nonsfi_tests', status):
# The only architectures that the PNaCl toolchain supports Non-SFI
# versions of are currently x86-32 and ARM, and ARM testing is covered
# by buildbot_pnacl.sh rather than this Python script.
if context['default_scons_platform'] == 'x86-32':
# TODO(mseaborn): Enable more tests here when they pass.
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + ['nonsfi_nacl=1', 'run_hello_world_test_irt'])
def Main():
context = BuildContext()
status = BuildStatus(context)
ParseStandardCommandLine(context)
if context.Linux():
SetupLinuxEnvironment(context)
else:
raise Exception('Unsupported platform')
RunBuild(BuildScriptX86, status)
if __name__ == '__main__':
Main()
| bsd-3-clause |
QuLogic/cartopy | lib/cartopy/tests/mpl/test_axes.py | 2 | 5117 | # Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
from unittest import mock
from matplotlib.testing.decorators import cleanup
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import numpy as np
import pytest
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import InterProjectionTransform, GeoAxes
from cartopy.tests.mpl import ImageTesting
from cartopy.tests.mpl.test_caching import CallCounter
class TestNoSpherical:
def setup_method(self):
self.ax = plt.axes(projection=ccrs.PlateCarree())
self.data = np.arange(12).reshape((3, 4))
def teardown_method(self):
plt.clf()
plt.close()
def test_contour(self):
with pytest.raises(ValueError):
self.ax.contour(self.data, transform=ccrs.Geodetic())
def test_contourf(self):
with pytest.raises(ValueError):
self.ax.contourf(self.data, transform=ccrs.Geodetic())
def test_pcolor(self):
with pytest.raises(ValueError):
self.ax.pcolor(self.data, transform=ccrs.Geodetic())
def test_pcolormesh(self):
with pytest.raises(ValueError):
self.ax.pcolormesh(self.data, transform=ccrs.Geodetic())
def test_transform_PlateCarree_shortcut():
src = ccrs.PlateCarree(central_longitude=0)
target = ccrs.PlateCarree(central_longitude=180)
# of the 3 paths, 2 of them cannot be short-cutted.
pth1 = mpath.Path([[0.5, 0], [10, 10]])
pth2 = mpath.Path([[0.5, 91], [10, 10]])
pth3 = mpath.Path([[-0.5, 0], [10, 10]])
trans = InterProjectionTransform(src, target)
counter = CallCounter(target, 'project_geometry')
with counter:
trans.transform_path(pth1)
# pth1 should allow a short-cut.
assert counter.count == 0
with counter:
trans.transform_path(pth2)
assert counter.count == 1
with counter:
trans.transform_path(pth3)
assert counter.count == 2
class Test_InterProjectionTransform:
def pc_2_pc(self):
return InterProjectionTransform(
ccrs.PlateCarree(), ccrs.PlateCarree())
def pc_2_rob(self):
return InterProjectionTransform(ccrs.PlateCarree(), ccrs.Robinson())
def rob_2_rob_shifted(self):
return InterProjectionTransform(
ccrs.Robinson(), ccrs.Robinson(central_longitude=0))
def test_eq(self):
assert self.pc_2_pc() == self.pc_2_pc()
assert self.pc_2_rob() == self.pc_2_rob()
assert self.rob_2_rob_shifted() == self.rob_2_rob_shifted()
assert not self.pc_2_rob() == self.rob_2_rob_shifted()
assert not self.pc_2_pc() == 'not a transform obj'
def test_ne(self):
assert not self.pc_2_pc() != self.pc_2_pc()
print(self.pc_2_pc() != self.pc_2_rob())
assert self.pc_2_pc() != self.pc_2_rob()
class Test_Axes_add_geometries:
def teardown_method(self):
plt.close()
@mock.patch('cartopy.mpl.geoaxes.GeoAxes.add_feature')
@mock.patch('cartopy.feature.ShapelyFeature')
def test_styler_kwarg(self, ShapelyFeature, add_feature_method):
ax = GeoAxes(plt.figure(), [0, 0, 1, 1],
map_projection=ccrs.Robinson())
ax.add_geometries(mock.sentinel.geometries, mock.sentinel.crs,
styler=mock.sentinel.styler, wibble='wobble')
ShapelyFeature.assert_called_once_with(
mock.sentinel.geometries, mock.sentinel.crs, wibble='wobble')
add_feature_method.assert_called_once_with(
ShapelyFeature(), styler=mock.sentinel.styler)
@cleanup
def test_geoaxes_subplot():
ax = plt.subplot(1, 1, 1, projection=ccrs.PlateCarree())
assert str(ax.__class__) == "<class 'cartopy.mpl.geoaxes.GeoAxesSubplot'>"
@ImageTesting(['geoaxes_subslice'])
def test_geoaxes_no_subslice():
"""Test that we do not trigger matplotlib's line subslice optimization."""
# This behavior caused lines with > 1000 points and
# sorted data to disappear
fig, axes = plt.subplots(1, 2, subplot_kw={'projection': ccrs.Mercator()})
for ax, num_points in zip(axes, [1000, 1001]):
lats = np.linspace(35, 37, num_points)
lons = np.linspace(-117, -115, num_points)
ax.plot(lons, lats, transform=ccrs.PlateCarree())
@ImageTesting(['geoaxes_set_boundary_clipping'])
def test_geoaxes_set_boundary_clipping():
"""Test that setting the boundary works properly for clipping #1620."""
lon, lat = np.meshgrid(np.linspace(-180., 180., 361),
np.linspace(-90., -60., 31))
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1, projection=ccrs.SouthPolarStereo())
# Limit the map to -60 degrees latitude and below.
ax1.set_extent([-180, 180, -90, -60], ccrs.PlateCarree())
ax1.gridlines()
ax1.contourf(lon, lat, lat, transform=ccrs.PlateCarree())
ax1.set_boundary(mpath.Path.circle(center=(0.5, 0.5), radius=0.5),
transform=ax1.transAxes)
| lgpl-3.0 |
GregMJP/PCWG | turbine.py | 1 | 22085 | import math
import interpolators
import scipy.interpolate
import numpy as np
import scipy as sp
from scipy import stats
import sys
import pandas as pd
class PowerCurve:
def __init__(self, powerCurveLevels, referenceDensity, rotorGeometry, powerCol, turbCol, wsCol = None,
countCol = None, fixedTurbulence = None, ratedPower = None,turbulenceRenormalisation=True,
name = 'Undefined', interpolationMode = 'Cubic', required = False):
self.actualPower = powerCol #strings defining column names
self.inputHubWindSpeed = wsCol
self.hubTurbulence = turbCol
self.dataCount = countCol
self.name = name
self.interpolationMode = interpolationMode
self.required = required
if (self.hubTurbulence is not None) and fixedTurbulence != None:
raise Exception("Cannot specify both turbulence levels and fixed turbulence")
self.availablePower = AvailablePower(rotorGeometry.area, referenceDensity)
self.powerCurveLevels = powerCurveLevels
self.referenceDensity = referenceDensity
self.rotorGeometry = rotorGeometry
has_pc = len(self.powerCurveLevels.index) != 0
self.firstWindSpeed = min(self.powerCurveLevels.index) if has_pc else None
self.cutInWindSpeed = self.calculateCutInWindSpeed(powerCurveLevels) if has_pc else None
self.cutOutWindSpeed = self.calculateCutOutWindSpeed(powerCurveLevels) if has_pc else None
if self.inputHubWindSpeed is None:
ws_data = None
else:
ws_data = powerCurveLevels[self.inputHubWindSpeed]
print "calculating power function"
self.powerFunction = self.createPowerFunction(powerCurveLevels[self.actualPower], ws_data) if has_pc else None
print "power function calculated"
self.ratedPower = self.getRatedPower(ratedPower, powerCurveLevels[self.actualPower]) if has_pc else None
if 'Data Count' in self.powerCurveLevels.columns:
self.hours = self.powerCurveLevels['Data Count'].sum()*1.0/6.0
else:
self.hours = 0.0
self.turbulenceFunction = self.createTurbulenceFunction(powerCurveLevels[self.hubTurbulence], ws_data) if has_pc else None
if (turbulenceRenormalisation and has_pc):
print "Calculating zero turbulence curve for {0} Power Curve".format(self.name)
try:
self.calcZeroTurbulencePowerCurve()
print "Calculation of zero turbulence curve for {0} Power Curve successful".format(self.name)
except Exception as error:
err_msg ="Calculation of zero turbulence curve for {0} Power Curve unsuccessful: {1}".format(self.name, error)
print self.required
if not self.required:
print err_msg
else:
raise Exception(err_msg)
def calcZeroTurbulencePowerCurve(self):
keys = sorted(self.powerCurveLevels[self.actualPower].keys())
integrationRange = IntegrationRange(0.0, 100.0, 0.1)
self.zeroTurbulencePowerCurve = ZeroTurbulencePowerCurve(keys, self.getArray(self.powerCurveLevels[self.actualPower], keys), self.getArray(self.powerCurveLevels[self.hubTurbulence], keys), integrationRange, self.availablePower)
self.simulatedPower = SimulatedPower(self.zeroTurbulencePowerCurve, integrationRange)
def getRatedPower(self, ratedPower, powerCurveLevels):
if ratedPower == None:
return powerCurveLevels.max()
else:
return ratedPower
def getThresholdWindSpeed(self):
return float(interpolators.LinearPowerCurveInterpolator(self.powerCurveLevels[self.actualPower].as_matrix(), list(self.powerCurveLevels[self.actualPower].index), self.ratedPower)(0.85*self.ratedPower) * 1.5)
def getTurbulenceLevels(self, powerCurveLevels, turbulenceLevels, fixedTurbulence):
if fixedTurbulence != None:
turbulenceLevels = pd.Series(index = powerCurveLevels.index)
for level in powerCurveLevels.index:
turbulenceLevels[level] = fixedTurbulence
else:
turbulenceLevels = turbulenceLevels
return turbulenceLevels
def getArray(self, dictionary, keys):
array = []
for key in keys:
array.append(dictionary[key])
return array
def createTurbulenceFunction(self, y_data, x_data):
if x_data is None:
x_data = pd.Series(y_data.index, index = y_data.index)
x, y = [], []
for i in y_data.index:
if i in x_data.index:
x.append(x_data[i])
else:
x.append(i)
y.append(y_data[i])
return interpolators.LinearTurbulenceInterpolator(x, y)
def createPowerFunction(self, y_data, x_data):
if x_data is None:
x_data = pd.Series(y_data.index, index = y_data.index)
x, y = [], []
for i in y_data.index:
if i in x_data.index and not np.isnan(x_data[i]):
x.append(x_data[i])
else:
x.append(i)
y.append(y_data[i])
print i, x[-1], y[-1]
if self.interpolationMode == 'Linear':
return interpolators.LinearPowerCurveInterpolator(x, y, self.cutOutWindSpeed)
elif self.interpolationMode == 'Cubic':
return interpolators.CubicPowerCurveInterpolator(x, y, self.cutOutWindSpeed)
elif self.interpolationMode == 'Marmander':
return interpolators.MarmanderPowerCurveInterpolator(x, y, self.cutOutWindSpeed)
else:
raise Exception('Unknown interpolation mode: %s' % self.interpolationMode)
def power(self, windSpeed, turbulence = None, extraTurbCorrection = False):
referencePower = self.powerFunction(windSpeed)
if turbulence == None:
power = referencePower
else:
referenceTurbulence = self.referenceTurbulence(windSpeed)
power = referencePower + self.simulatedPower.power(windSpeed, turbulence) - self.simulatedPower.power(windSpeed, referenceTurbulence)
if extraTurbCorrection: power *= self.calculateExtraTurbulenceCorrection(windSpeed, turbulence, referenceTurbulence)
power = max([0.0, power])
power = min([self.ratedPower, power])
return power
def calculateExtraTurbulenceCorrection(self, windSpeed, turbulence, referenceTurbulence):
saddle = 9.0
xprime = saddle - windSpeed
tprime = (referenceTurbulence - turbulence) / referenceTurbulence
if xprime < 0.0 or tprime < 0.0: return 1.0
a = -0.02 * math.tanh(2.0 * tprime)
b = -0.03 * (math.exp(1.5 * tprime) - 1.0)
loss = a * xprime + b
return 1 + loss
def referenceTurbulence(self, windSpeed):
if windSpeed < self.firstWindSpeed:
return self.turbulenceFunction(self.firstWindSpeed)
elif windSpeed > self.cutOutWindSpeed:
return self.turbulenceFunction(self.cutOutWindSpeed)
else:
return self.turbulenceFunction(windSpeed)
def calculateCutInWindSpeed(self, powerCurveLevels):
return min(self.nonZeroLevels(powerCurveLevels))
def calculateCutOutWindSpeed(self, powerCurveLevels):
return max(self.nonZeroLevels(powerCurveLevels))
def nonZeroLevels(self, powerCurveLevels):
levels = []
for windSpeed in self.powerCurveLevels.index:
if self.powerCurveLevels[self.actualPower][windSpeed] > 0.0:
levels.append(windSpeed)
return levels
def __str__(self):
value = "Wind Speed\tPower\n"
for windSpeed in self.powerCurveLevels:
value += "%0.2f\t%0.2f\n" % (windSpeed, self.power(windSpeed))
return value
class RotorGeometry:
def __init__(self, diameter, hubHeight):
self.diameter = diameter
self.radius = diameter / 2
self.area = math.pi * self.radius ** 2
self.hubHeight = hubHeight
self.lowerTip = self.hubHeight - self.radius
self.upperTip = self.hubHeight + self.radius
def withinRotor(self, height):
return height > self.lowerTip and height < self.upperTip
class InterpolatedNormDist:
def __init__(self):
#speed optimisation
self.xstep = 0.05
self.xend = 5.0
self.xstart = -self.xend
self.steps = int((self.xend - self.xstart) / self.xstep) + 1
x = np.linspace(self.xstart, self.xend, self.steps)
y = []
normDist = NormDist()
for i in range(len(x)):
y.append(normDist.probability(x[i], 0.0, 1.0))
self.f = scipy.interpolate.interp1d(x, y, bounds_error = False, fill_value = 0.0)
def probability(self, windSpeed, windSpeedMean, windSpeedStandardDeviation):
oneOverStandardDeviation = 1.0 / windSpeedStandardDeviation
standardDeviationsFromMean = oneOverStandardDeviation * (windSpeed - windSpeedMean)
return self.f(standardDeviationsFromMean) * oneOverStandardDeviation
class DictionaryNormDist:
def __init__(self):
#speed optimisation
self.decimalPlaces = 2
self.xstep = 0.1 ** self.decimalPlaces
self.xend = 5.0
self.xstart = -self.xend
x = np.arange(self.xstart, self.xend + self.xstep, self.xstep)
self.dictionary = {}
normDist = NormDist()
for i in range(len(x)):
self.dictionary[self.key(x[i])] = normDist.probability(x[i], 0.0, 1.0)
def probability(self, windSpeed, windSpeedMean, windSpeedStandardDeviation):
oneOverStandardDeviation = self.oneOver(windSpeedStandardDeviation)
standardDeviationsFromMean = self.standardDeviationsFromMean(windSpeed, windSpeedMean, oneOverStandardDeviation)
if self.inDictionary(standardDeviationsFromMean):
return self.lookUpDictionary(standardDeviationsFromMean) * oneOverStandardDeviation
else:
return 0.0
def oneOver(self, value):
return 1.0 / value
def standardDeviationsFromMean(self, value, mean, oneOverStandardDeviation):
return oneOverStandardDeviation * (value - mean)
def inDictionary(self, value):
if value < self.xstart: return False
if value > self.xend: return False
return True
def lookUpDictionary(self, value):
return self.dictionary[self.key(value)]
def key(self, value):
return round(value, self.decimalPlaces)
class IntegrationProbabilities:
def __init__(self, windSpeeds, windSpeedStep):
#speed otpimised normal distribution
self.windSpeeds = windSpeeds
self.a = windSpeedStep / math.sqrt(2.0 * math.pi)
def probabilities(self, windSpeedMean, windSpeedStdDev):
if windSpeedStdDev == 0:
return np.nan
oneOverStandardDeviation = 1.0 / windSpeedStdDev
oneOverStandardDeviationSq = oneOverStandardDeviation * oneOverStandardDeviation
b = self.a * oneOverStandardDeviation
c = -0.5 * oneOverStandardDeviationSq
windSpeedMinusMeans = (self.windSpeeds - windSpeedMean)
windSpeedMinusMeanSq = windSpeedMinusMeans * windSpeedMinusMeans
d = c * windSpeedMinusMeanSq
return b * np.exp(d)
class IntegrationRange:
def __init__(self, minimumWindSpeed, maximumWindSpeed, windSpeedStep):
self.minimumWindSpeed = minimumWindSpeed
self.maximumWindSpeed = maximumWindSpeed
self.windSpeedStep = windSpeedStep
self.windSpeeds = np.arange(minimumWindSpeed, maximumWindSpeed, windSpeedStep)
self.integrationProbabilities = IntegrationProbabilities(self.windSpeeds, self.windSpeedStep)
def probabilities(self, windSpeedMean, windSpeedStdDev):
return self.integrationProbabilities.probabilities(windSpeedMean, windSpeedStdDev)
class AvailablePower:
def __init__(self, area, density):
self.area = area
self.density = density
def power(self, windSpeed):
return 0.5 * self.density * self.area * windSpeed * windSpeed * windSpeed / 1000.0
def powerCoefficient(self, windSpeed, actualPower):
return actualPower / self.power(windSpeed)
class ZeroTurbulencePowerCurve:
def __init__(self, referenceWindSpeeds, referencePowers, referenceTurbulences, integrationRange, availablePower):
self.integrationRange = integrationRange
self.initialZeroTurbulencePowerCurve = InitialZeroTurbulencePowerCurve(referenceWindSpeeds, referencePowers, referenceTurbulences, integrationRange, availablePower)
simulatedReferencePowerCurve = SimulatedPowerCurve(referenceWindSpeeds, self.initialZeroTurbulencePowerCurve, referenceTurbulences, integrationRange)
self.windSpeeds = referenceWindSpeeds
self.powers = []
for i in range(len(self.windSpeeds)):
power = referencePowers[i] - simulatedReferencePowerCurve.powers[i] + self.initialZeroTurbulencePowerCurve.powers[i]
self.powers.append(power)
#print "%f %f" % (self.windSpeeds[i], self.powers[i])
self.powerFunction = scipy.interpolate.interp1d(self.windSpeeds, self.powers)
self.minWindSpeed = min(self.windSpeeds)
self.maxWindSpeed = max(self.windSpeeds)
self.maxPower = max(self.powers)
self.dfPowerLevels = pd.DataFrame(self.powers, index = self.windSpeeds, columns = ['Power'])
def power(self, windSpeed):
if windSpeed <= self.minWindSpeed:
return 0.0
elif windSpeed >= self.maxWindSpeed:
return self.maxPower
else:
return self.powerFunction(windSpeed)
class InitialZeroTurbulencePowerCurve:
def __init__(self, referenceWindSpeeds, referencePowers, referenceTurbulences, integrationRange, availablePower):
self.maxIterations = 5
self.integrationRange = integrationRange
self.availablePower = availablePower
self.referenceWindSpeeds = referenceWindSpeeds
self.referencePowers = referencePowers
self.referenceTurbulences = referenceTurbulences
self.referencePowerCurveStats = IterationPowerCurveStats(referenceWindSpeeds, referencePowers, availablePower)
#print "%f %f %f" % (self.referencePowerCurveStats.ratedPower, self.referencePowerCurveStats.cutInWindSpeed, self.referencePowerCurveStats.cpMax)
self.selectedStats = self.solve(self.referencePowerCurveStats)
selectedIteration = InitialZeroTurbulencePowerCurveIteration(referenceWindSpeeds,
self.availablePower,
self.selectedStats.ratedPower,
self.selectedStats.cutInWindSpeed,
self.selectedStats.cpMax)
self.ratedWindSpeed = selectedIteration.ratedWindSpeed
self.windSpeeds = selectedIteration.windSpeeds
self.powers = selectedIteration.powers
self.power = selectedIteration.power
def solve(self, previousIterationStats, iterationCount = 1):
if iterationCount > self.maxIterations: raise Exception("Failed to solve initial zero turbulence curve in permitted number of iterations")
iterationZeroTurbCurve = InitialZeroTurbulencePowerCurveIteration(self.integrationRange.windSpeeds,
self.availablePower,
previousIterationStats.ratedPower,
previousIterationStats.cutInWindSpeed,
previousIterationStats.cpMax)
iterationSimulatedCurve = SimulatedPowerCurve(self.referenceWindSpeeds, iterationZeroTurbCurve, self.referenceTurbulences, self.integrationRange)
iterationSimulatedCurveStats = IterationPowerCurveStats(iterationSimulatedCurve.windSpeeds, iterationSimulatedCurve.powers, self.availablePower)
convergenceCheck = IterationPowerCurveConvergenceCheck(self.referencePowerCurveStats, iterationSimulatedCurveStats)
#print "%f %f %f" % (iterationSimulatedCurveStats.ratedPower, iterationSimulatedCurveStats.cutInWindSpeed, iterationSimulatedCurveStats.cpMax)
#print "%s %s %s" % (convergenceCheck.ratedPowerConverged, convergenceCheck.cutInConverged, convergenceCheck.cpMaxConverged)
if convergenceCheck.isConverged:
return previousIterationStats
else:
return self.solve(IncrementedPowerCurveStats(previousIterationStats, convergenceCheck), iterationCount + 1)
class IterationPowerCurveConvergenceCheck:
def __init__(self, referenceStats, iterationStats):
self.threholdPowerDiff = referenceStats.ratedPower * 0.001
self.threholdCutInWindSpeedDiff = 0.5
self.threholdCpMaxDiff = 0.01
self.ratedPowerDiff = iterationStats.ratedPower - referenceStats.ratedPower
self.cutInDiff = iterationStats.cutInWindSpeed - referenceStats.cutInWindSpeed
self.cpMaxDiff = iterationStats.cpMax - referenceStats.cpMax
self.ratedPowerConverged = abs(self.ratedPowerDiff) < self.threholdPowerDiff
self.cutInConverged = abs(self.cutInDiff) <= self.threholdCutInWindSpeedDiff
self.cpMaxConverged = abs(self.cpMaxDiff) <= self.threholdCpMaxDiff
self.isConverged = self.ratedPowerConverged and self.cutInConverged and self.cpMaxConverged
class IncrementedPowerCurveStats:
def __init__(self, previousIterationStats, convergenceCheck):
if convergenceCheck.ratedPowerConverged:
self.ratedPower = previousIterationStats.ratedPower
else:
self.ratedPower = previousIterationStats.ratedPower - convergenceCheck.ratedPowerDiff
if convergenceCheck.cutInConverged:
self.cutInWindSpeed = previousIterationStats.cutInWindSpeed
else:
self.cutInWindSpeed = previousIterationStats.cutInWindSpeed - convergenceCheck.cutInDiff
if convergenceCheck.cpMaxConverged:
self.cpMax = previousIterationStats.cpMax
else:
self.cpMax = previousIterationStats.cpMax - convergenceCheck.cpMaxDiff
class InitialZeroTurbulencePowerCurveIteration:
def __init__(self, windSpeeds, availablePower, ratedPower, cutInWindSpeed, cpMax):
self.windSpeeds = windSpeeds
self.powers = []
self.ratedWindSpeed = ((2.0 * ratedPower * 1000.0)/(availablePower.density * cpMax * availablePower.area)) ** (1.0 / 3.0)
self.ratedPower = ratedPower
self.cutInWindSpeed = cutInWindSpeed
self.cpMax = cpMax
self.availablePower = availablePower
for windSpeed in self.windSpeeds:
self.powers.append(self.power(windSpeed))
def power(self, windSpeed):
if windSpeed > self.cutInWindSpeed:
if windSpeed < self.ratedWindSpeed:
return self.availablePower.power(windSpeed) * self.cpMax
else:
return self.ratedPower
else:
return 0.0
class IterationPowerCurveStats:
def __init__(self, windSpeeds, powers, availablePower):
self.ratedPower = max(powers)
thresholdPower = self.ratedPower * 0.001
operatingWindSpeeds = []
cps = []
for i in range(len(windSpeeds)):
windSpeed = windSpeeds[i]
power = powers[i]
cp = availablePower.powerCoefficient(windSpeed, power)
cps.append(availablePower.powerCoefficient(windSpeed, power))
if power >= thresholdPower: operatingWindSpeeds.append(windSpeed)
self.cpMax = max(cps)
if len(operatingWindSpeeds) > 0:
self.cutInWindSpeed = min(operatingWindSpeeds)
else:
self.cutInWindSpeed = 0.0
class SimulatedPower:
def __init__(self, zeroTurbulencePowerCurve, integrationRange):
self.zeroTurbulencePowerCurve = zeroTurbulencePowerCurve
self.integrationRange = integrationRange
integrationPowers = []
for windSpeed in np.nditer(self.integrationRange.windSpeeds):
integrationPowers.append(self.zeroTurbulencePowerCurve.power(windSpeed))
self.integrationPowers = np.array(integrationPowers)
def power(self, windSpeed, turbulence):
standardDeviation = windSpeed * turbulence
integrationProbabilities = self.integrationRange.probabilities(windSpeed, standardDeviation)
return np.sum(integrationProbabilities * self.integrationPowers) / np.sum(integrationProbabilities)
class SimulatedPowerCurve:
def __init__(self, windSpeeds, zeroTurbulencePowerCurve, turbulences, integrationRange):
simulatedPower = SimulatedPower(zeroTurbulencePowerCurve, integrationRange)
self.windSpeeds = windSpeeds
self.turbulences = turbulences
self.powers = []
for i in range(len(windSpeeds)):
windSpeed = windSpeeds[i]
turbulence = turbulences[i]
power = simulatedPower.power(windSpeed, turbulence)
self.powers.append(power)
| mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/core/api.py | 7 | 1837 |
# pylint: disable=W0614,W0401,W0611
# flake8: noqa
import numpy as np
from pandas.core.algorithms import factorize, match, unique, value_counts
from pandas.types.missing import isnull, notnull
from pandas.core.categorical import Categorical
from pandas.core.groupby import Grouper
from pandas.formats.format import set_eng_float_format
from pandas.core.index import (Index, CategoricalIndex, Int64Index,
RangeIndex, Float64Index, MultiIndex)
from pandas.core.series import Series, TimeSeries
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel, WidePanel
from pandas.core.panel4d import Panel4D
from pandas.core.groupby import groupby
from pandas.core.reshape import (pivot_simple as pivot, get_dummies,
lreshape, wide_to_long)
from pandas.core.indexing import IndexSlice
from pandas.tseries.offsets import DateOffset
from pandas.tseries.tools import to_datetime
from pandas.tseries.index import (DatetimeIndex, Timestamp,
date_range, bdate_range)
from pandas.tseries.tdi import TimedeltaIndex, Timedelta
from pandas.tseries.period import Period, PeriodIndex
# see gh-14094.
from pandas.util.depr_module import _DeprecatedModule
_removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay',
'customBusinessMonthEnd', 'customBusinessMonthBegin',
'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin',
'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd',
'byearEnd', 'week']
datetools = _DeprecatedModule(deprmod='pandas.core.datetools',
removals=_removals)
from pandas.core.config import (get_option, set_option, reset_option,
describe_option, option_context, options)
| apache-2.0 |
vibhatha/mean | App_54/scripts/svmalgo.py | 11 | 5316 | __author__ = 'Vbabey'
import numpy as np
def identify(input):
X_input =[]
#print('Input :'+input)
values=str.split(input, ",")
#print(values)
if(len(values)==5):
array=list(map(float, values))
#print(array)
X_input.append(array)
#print(X_input)
X = np.array([[59.13, 63.00, 232.60, 0.27, 0.94],
[58.62, 62.33, 232.45, 0.27, 0.94],[57.77, 61.66, 232.64, 0.27, 0.94],[59.02, 62.94, 232.65, 0.27, 0.94],[58.14, 62.02, 232.49, 0.27, 0.94],
[58.99, 62.90, 232.54, 0.27, 0.94],[59.19, 62.94, 232.73, 0.27, 0.94],[57.91, 61.84, 232.78, 0.27, 0.94],[59.41, 62.95, 232.77, 0.27, 0.94],
[58.84, 62.53, 232.61, 0.27, 0.94],[58.62, 62.24, 232.39, 0.27, 0.94],[59.25, 63.21, 233.36, 0.27, 0.94],[59.49, 63.30, 232.62, 0.27, 0.94],
[59.34, 62.86, 232.64, 0.27, 0.94],[58.86, 62.66, 232.74, 0.27, 0.94],[59.62, 63.71, 233.30, 0.27, 0.94],[58.39, 62.23, 232.45, 0.27, 0.94],
[58.90, 62.56, 232.53, 0.27, 0.94],[58.97, 62.90, 229.02, 0.27, 0.94],[60.03, 63.94, 230.12, 0.28, 0.94],[58.74, 62.74, 230.19, 0.27, 0.94],
[58.73, 62.92, 230.02, 0.27, 0.93],[59.07, 63.09, 229.94, 0.27, 0.94],[57.94, 61.80, 229.91, 0.27, 0.94],[59.38, 63.22, 229.82, 0.28, 0.94],
[58.28, 62.18, 229.84, 0.27, 0.94],[58.99, 62.42, 229.91, 0.27, 0.95],[59.48, 63.44, 229.86, 0.28, 0.94],[58.52, 62.29, 228.85, 0.27, 0.94],[59.07, 63.36, 229.82, 0.28, 0.93],[57.74, 61.61, 230.03, 0.27, 0.94],[59.74, 63.50, 230.07, 0.28, 0.94],[58.66, 62.67, 228.90, 0.27, 0.94],[59.60, 63.51, 229.79, 0.28, 0.94],[58.42, 62.43, 229.92, 0.27, 0.94],
[59.57, 63.48, 230.15, 0.28, 0.94],[113.38, 118.28, 229.07, 0.52, 0.96],[114.26, 118.80, 228.95, 0.52, 0.96],[113.65, 118.62, 229.01, 0.52, 0.96],
[114.37, 119.17, 228.95, 0.52, 0.96],[114.47, 119.28, 228.93, 0.52, 0.96],[114.30, 119.33, 228.80, 0.52, 0.96],[114.63, 119.48, 228.91, 0.52, 0.96],
[113.52, 118.08, 228.83, 0.52, 0.96],[114.90, 119.56, 228.92, 0.52, 0.96],[115.31, 120.04, 229.75, 0.52, 0.96],[113.55, 118.28, 228.85, 0.52, 0.96],
[114.73, 119.32, 228.64, 0.52, 0.96],[114.90, 119.60, 228.75, 0.52, 0.96],[115.74, 120.39, 229.55, 0.52, 0.96],[113.98, 118.88, 228.90, 0.52, 0.96],
[114.75, 119.25, 228.88, 0.52, 0.96],[114.29, 119.02, 228.97, 0.52, 0.96],[114.41, 119.17, 228.92, 0.52, 0.96],[172.94, 179.17, 230.67, 0.78, 0.97],
[172.12, 177.94, 230.67, 0.77, 0.97],[171.54, 177.20, 230.47, 0.77, 0.97],[171.69, 177.39, 230.50, 0.77, 0.97],[172.16, 177.83, 230.40, 0.77, 0.97],
[172.87, 178.36, 230.47, 0.77, 0.97],[171.71, 177.43, 230.45, 0.77, 0.97],[171.94, 177.60, 230.50, 0.77, 0.97],[171.82, 177.36, 230.46, 0.77, 0.97],
[172.10, 177.78, 230.51, 0.77, 0.97],[172.35, 178.07, 230.46, 0.77, 0.97],[172.07, 177.62, 230.57, 0.77, 0.97],[172.48, 178.13, 230.45, 0.77, 0.97],
[171.91, 177.54, 230.52, 0.77, 0.97],[172.64, 178.02, 230.40, 0.77, 0.97],[172.37, 178.00, 230.53, 0.77, 0.97],[172.18, 177.64, 230.30, 0.77, 0.97],
[172.02, 177.58, 230.47, 0.77, 0.97],[223.25, 229.93, 229.77, 1.00, 0.97],[223.37, 229.94, 229.85, 1.00, 0.97],[222.35, 229.00, 229.80, 1.00, 0.97],
[221.94, 228.78, 229.62, 1.00, 0.97],[221.68, 228.36, 229.59, 0.99, 0.97],[221.87, 228.55, 229.86, 0.99, 0.97],[222.08, 228.79, 229.58, 1.00, 0.97],
[223.70, 230.29, 229.94, 1.00, 0.97],[222.94, 229.59, 229.70, 1.00, 0.97],[223.41, 229.81, 229.90, 1.00, 0.97],[222.17, 229.00, 229.88, 1.00, 0.97],
[223.46, 229.91, 229.86, 1.00, 0.97],[221.85, 228.37, 229.75, 0.99, 0.97],[223.26, 229.89, 229.83, 1.00, 0.97],[223.35, 229.67, 229.92, 1.00, 0.97],
[222.78, 229.50, 229.92, 1.00, 0.97],[222.19, 229.08, 229.72, 1.00, 0.97],[223.67, 230.58, 229.93, 1.00, 0.97]])
y = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4])
from sklearn.svm import SVC
from sklearn.svm import NuSVC
clf = NuSVC(probability=True)
clf.fit(X, y)
#print("Device 4 Type Data Set: ")
#print(clf.predict([[221.41, 227.81, 229.60, 1.00, 0.95]]))
#print("Device 2 Type Data Set: ")
#print(clf.predict([[112.74, 118.39, 227.55, 0.55, 0.93]]))
#print("Device 1 Type Data Set: ")
#print(clf.predict([[56.28, 64.18, 227.84, 0.29, 0.96]]))
#print("Device 1 Type Data Set: ")
#print(clf.predict([[59.77, 63.66, 231.34, 0.24, 0.92]]))
#print("Device 3 Type Data Set: ")
#print(clf.predict([[171.37, 178.10, 230.23, 0.76, 0.97]]))
#print("Device X Type Data Set: ")
#print(clf.predict([[571.37, 478.10, 330.23, 0.76, 0.97]]))
X_input = np.asarray(X_input)
print(clf.predict(X_input))
X1=[[571.37, 478.10, 330.23, 0.76, 0.97]]
y1=[2]
#X_test=np.array([[571.37, 478.10, 330.23, 0.76, 0.97],[221.41, 227.81, 229.60, 1.00, 0.95],[112.74, 118.39, 227.55, 0.55, 0.93],[56.28, 64.18, 227.84, 0.29, 0.96],[171.37, 178.10, 230.23, 0.76, 0.97]])
#y_test=np.array([5,4,2,1,3])
#score1=clf.score(X_test,y_test)
#print("Score : ",score1)
#pre =clf.predict_proba(X_test)
#print(pre)
identify('58.62, 62.33, 232.45, 0.27, 0.94')
#v = clf.decision_function(X)
#print("dec",v)
| mit |
ChinaQuants/zipline | tests/utils/test_factory.py | 34 | 2175 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import pandas as pd
import pytz
import numpy as np
from zipline.utils.factory import (load_from_yahoo,
load_bars_from_yahoo)
class TestFactory(TestCase):
def test_load_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=stocks, start=start, end=end)
assert data.index[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.index[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.columns
np.testing.assert_raises(
AssertionError, load_from_yahoo, stocks=stocks,
start=end, end=start
)
def test_load_bars_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_bars_from_yahoo(stocks=stocks, start=start, end=end)
assert data.major_axis[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.major_axis[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.items
for ohlc in ['open', 'high', 'low', 'close', 'volume', 'price']:
assert ohlc in data.minor_axis
np.testing.assert_raises(
AssertionError, load_bars_from_yahoo, stocks=stocks,
start=end, end=start
)
| apache-2.0 |
RachitKansal/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
jpzk/evopy | evopy/examples/experiments/sum_constraints_cmaes_cmaesrrsvc/histograms.py | 1 | 3999 | # Using the magic encoding
# -*- coding: utf-8 -*-
'''
This file is part of evopy.
Copyright 2012 - 2013, Jendrik Poloczek
evopy is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
evopy is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
evopy. If not, see <http://www.gnu.org/licenses/>.
'''
from sys import path
path.append("../../../..")
from itertools import chain
from copy import deepcopy
from numpy import matrix, log10, array, linspace, sqrt, pi, exp
from pickle import load
from evopy.strategies.ori_dses_svc_repair import ORIDSESSVCR
from evopy.strategies.ori_dses_svc import ORIDSESSVC
from evopy.strategies.ori_dses import ORIDSES
from evopy.simulators.simulator import Simulator
from evopy.problems.sphere_problem_origin_r1 import SphereProblemOriginR1
from evopy.problems.sphere_problem_origin_r2 import SphereProblemOriginR2
from evopy.problems.schwefels_problem_26 import SchwefelsProblem26
from evopy.problems.tr_problem import TRProblem
from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel
from sklearn.cross_validation import KFold
from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore
from evopy.operators.scaling.scaling_dummy import ScalingDummy
from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear
from evopy.operators.termination.or_combinator import ORCombinator
from evopy.operators.termination.accuracy import Accuracy
from evopy.operators.termination.generations import Generations
from evopy.operators.termination.convergence import Convergence
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from pylab import hist, plot
from setup import *
cfcsf = file("output/sum_cfcs_file.save", "r")
cfcs = load(cfcsf)
def gauss(u):
return (1.0 / sqrt(2 * pi)) * exp((-(1.0/2.0) * (u**2)))
def nadaraya(x, data, labels, h):
labels = [0] + labels.tolist() + [0]
data = data.tolist()
data = [data[0] - (data[1] - data[0])] + data
print data, labels
bottom = sum(map(lambda sample : (1/h)*gauss((x - sample)/h), data))
top = sum(map(lambda sample, label : label * (1/h)* gauss((x - sample)/h), data, labels))
if(bottom == 0.0):
return 0
return float(top)/float(bottom)
for problem in problems:
figure_hist = plt.figure(figsize=(8,6), dpi=10, facecolor="w", edgecolor="k")
logit = lambda value, optimum : log10(value - optimum)
opt = problem().optimum_fitness()
plt.xlabel('kumulierte Restriktionsaufrufe')
plt.ylabel('absolute H' + u'ä' + 'ufigkeit')
x1 = cfcs[problem][optimizers[problem][0]]
x2 = cfcs[problem][optimizers[problem][1]]
import pdb
pdb.set_trace()
minimum = min(x1 + x2)
maximum = max(x1 + x2)
plt.xlim([0, 20000])
pdfs1, bins1, patches1 = hist(x1, normed=False, alpha=0.5,\
histtype='step', edgecolor="g", bins = range(0, 20000+1000, 1000))
h = 1.06 * array(x1).std() * (len(x1)**(-1.0/5.0))
x = linspace(0, 20000, 100)
y = map(lambda x : nadaraya(x, bins1, pdfs1, h), x)
plot(x,y, linestyle="--", color="g")
pdfs2, bins2, patches2 = hist(x2, normed=False, alpha=0.5,\
histtype='step', edgecolor="#004779", bins = range(0, 20000+1000, 1000))
h = 1.06 * array(x2).std() * (len(x2)**(-1.0/5.0))
x = linspace(0, 20000, 100)
y = map(lambda x : nadaraya(x, bins2, pdfs2, h), x)
plot(x,y, linestyle="-", color="#004779")
pp = PdfPages("output/%s.pdf" % str(problem).replace('.', '-'))
plt.savefig(pp, format='pdf')
pp.close()
plt.clf()
| gpl-3.0 |
dongjoon-hyun/spark | python/pyspark/sql/group.py | 23 | 10681 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.group_ops import PandasGroupedOpsMixin
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(PandasGroupedOpsMixin):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. versionadded:: 1.3.0
Parameters
----------
exprs : dict
a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
Notes
-----
Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
Examples
--------
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name='Alice', count(1)=1), Row(name='Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name='Alice', min(age)=2), Row(name='Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name='Alice', min_udf(age)=2), Row(name='Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
def count(self):
"""Counts the number of records for each group.
.. versionadded:: 1.3.0
Examples
--------
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
.. versionadded:: 1.3.0
Examples
--------
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
.. versionadded:: 1.6.0
Parameters
----------
pivot_col : str
Name of the column to pivot.
values :
List of values that will be translated to columns in the output DataFrame.
Examples
--------
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
globs['df5'] = sc.parallelize([
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)),
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
ericfourrier/decam | decam/utils.py | 1 | 6202 | # -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Create toolbox functions to use for the different pieces of code ot the package
"""
import warnings
from numpy.random import normal
from numpy.random import choice
import time
import pandas as pd
import numpy as np
def removena_numpy(array):
return array[~(np.isnan(array))]
def common_cols(df1,df2):
""" Return the intersection of commun columns name """
return list(set(df1.columns) & set(df2.columns))
def bootstrap_ci(x,n = 300 ,ci = 0.95):
"""
this is a function depending on numpy to compute bootstrap percentile
confidence intervalfor the mean of a numpy array
Arguments
---------
x : a numpy ndarray
n : the number of boostrap samples
ci : the percentage confidence (float) interval in ]0,1[
Return
-------
a tuple (ci_inf,ci_up)
"""
low_per = 100*(1 - ci)/2
high_per = 100*ci + low_per
x = removena_numpy(x)
if not len(x):
return (np.nan,np.nan)
bootstrap_samples = choice(a = x,size = (len(x),n),replace = True).mean(axis = 0)
return np.percentile(bootstrap_samples,[low_per,high_per])
def clock(func):
""" decorator to measure the duration of each test of the unittest suite,
this is extensible for any kind of functions it will just add a print """
def clocked(*args):
t0 = time.time()
result = func(*args)
elapsed = (time.time() - t0) * 1000 # in ms
print('elapsed : [{0:0.3f}ms]'.format(elapsed))
return result
return clocked
def create_test_df():
""" Creating a test pandas DataFrame for the unittest suite """
test_df = pd.DataFrame({'id' : [i for i in range(1,1001)],'member_id': [10*i for i in range(1,1001)]})
test_df['na_col'] = np.nan
test_df['id_na'] = test_df.id
test_df.loc[1:3,'id_na'] = np.nan
test_df['constant_col'] = 'constant'
test_df['constant_col_num'] = 0
test_df['character_factor'] = [choice(list('ABCDEFG')) for _ in range(1000)]
test_df['num_factor'] = [choice([1,2,3,4]) for _ in range(1000)]
test_df['nearzerovar_variable'] = 'most_common_value'
test_df.loc[0,'nearzerovar_variable'] = 'one_value'
test_df['binary_variable'] = [choice([0,1]) for _ in range(1000)]
test_df['character_variable'] = [str(i) for i in range(1000)]
test_df['duplicated_column'] = test_df.id
test_df['many_missing_70'] = [1]*300 + [np.nan] * 700
test_df['character_variable_fillna'] = ['A']*300 + ['B']*200 + ['C']*200 +[np.nan]*300
test_df['numeric_variable_fillna'] = [1]*400 + [3]*400 + [np.nan]*200
test_df['num_variable'] = 100
test_df['outlier'] = normal(size = 1000)
test_df.loc[[1,10,100],'outlier'] = [10,5,10]
return test_df
def get_test_df_complete():
""" get the full test dataset from Lending Club open source database,
the purpose of this fuction is to be used in a demo ipython notebook """
import requests
from zipfile import ZipFile
from io import StringIO
zip_to_download = "https://resources.lendingclub.com/LoanStats3b.csv.zip"
r = requests.get(zip_to_download)
zipfile = ZipFile(StringIO(r.content))
file_csv = zipfile.namelist()[0]
# we are using the c parser for speed
df = pd.read_csv(zipfile.open(file_csv), skiprows =[0], na_values = ['n/a','N/A',''],
parse_dates = ['issue_d','last_pymnt_d','next_pymnt_d','last_credit_pull_d'] )
zipfile.close()
df = df[:-2]
nb_row = float(len(df.index))
df['na_col'] = np.nan
df['constant_col'] = 'constant'
df['duplicated_column'] = df.id
df['many_missing_70'] = np.nan
df.loc[1:int(0.3*nb_row),'many_missing_70'] = 1
df['bad'] = 1
index_good = df['loan_status'].isin(['Fully Paid', 'Current','In Grace Period'])
df.loc[index_good,'bad'] = 0
return df
def psi(bench,target,group,print_df = True):
""" This function return the Population Stability Index, quantifying if the
distribution is stable between two states.
This statistic make sense and works is only working for numeric variables
for bench and target.
Params:
- bench is a numpy array with the reference variable.
- target is a numpy array of the new variable.
- group is the number of group you want consider.
"""
labels_q = np.percentile(bench,[(100.0/group)*i for i in range(group + 1)],interpolation = "nearest")
# This is the right approach when you have not a lot of unique value
ben_pct = (pd.cut(bench,bins = np.unique(labels_q),include_lowest = True).value_counts())/len(bench)
target_pct = (pd.cut(target,bins = np.unique(labels_q),include_lowest = True).value_counts())/len(target)
target_pct = target_pct.sort_index()# sort the index
ben_pct = ben_pct.sort_index() # sort the index
psi = sum((target_pct - ben_pct)*np.log(target_pct/ben_pct))
# Print results for better understanding
if print_df:
results = pd.DataFrame({'ben_pct': ben_pct.values,
'target_pct': target_pct.values},
index = ben_pct.index)
return {'data':results,'statistic': psi}
return psi
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [method for method in dir(object) if callable(getattr(object, method))]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning)
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
| mit |
zzcclp/spark | python/pyspark/pandas/missing/series.py | 16 | 5929 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pyspark.pandas.missing import unsupported_function, unsupported_property, common
def _unsupported_function(method_name, deprecated=False, reason=""):
return unsupported_function(
class_name="pd.Series", method_name=method_name, deprecated=deprecated, reason=reason
)
def _unsupported_property(property_name, deprecated=False, reason=""):
return unsupported_property(
class_name="pd.Series", property_name=property_name, deprecated=deprecated, reason=reason
)
class MissingPandasLikeSeries(object):
# Functions
asfreq = _unsupported_function("asfreq")
autocorr = _unsupported_function("autocorr")
combine = _unsupported_function("combine")
convert_dtypes = _unsupported_function("convert_dtypes")
cov = _unsupported_function("cov")
ewm = _unsupported_function("ewm")
infer_objects = _unsupported_function("infer_objects")
interpolate = _unsupported_function("interpolate")
reorder_levels = _unsupported_function("reorder_levels")
resample = _unsupported_function("resample")
searchsorted = _unsupported_function("searchsorted")
set_axis = _unsupported_function("set_axis")
slice_shift = _unsupported_function("slice_shift")
to_hdf = _unsupported_function("to_hdf")
to_period = _unsupported_function("to_period")
to_sql = _unsupported_function("to_sql")
to_timestamp = _unsupported_function("to_timestamp")
tshift = _unsupported_function("tshift")
tz_convert = _unsupported_function("tz_convert")
tz_localize = _unsupported_function("tz_localize")
view = _unsupported_function("view")
# Deprecated functions
convert_objects = _unsupported_function("convert_objects", deprecated=True)
nonzero = _unsupported_function("nonzero", deprecated=True)
reindex_axis = _unsupported_function("reindex_axis", deprecated=True)
select = _unsupported_function("select", deprecated=True)
get_values = _unsupported_function("get_values", deprecated=True)
# Properties we won't support.
array = common.array(_unsupported_property)
duplicated = common.duplicated(_unsupported_property)
nbytes = _unsupported_property(
"nbytes",
reason="'nbytes' requires to compute whole dataset. You can calculate manually it, "
"with its 'itemsize', by explicitly executing its count. Use Spark's web UI "
"to monitor disk and memory usage of your application in general.",
)
# Functions we won't support.
memory_usage = common.memory_usage(_unsupported_function)
to_pickle = common.to_pickle(_unsupported_function)
to_xarray = common.to_xarray(_unsupported_function)
__iter__ = common.__iter__(_unsupported_function)
ravel = _unsupported_function(
"ravel",
reason="If you want to collect your flattened underlying data as an NumPy array, "
"use 'to_numpy().ravel()' instead.",
)
if LooseVersion(pd.__version__) < LooseVersion("1.0"):
# Deprecated properties
blocks = _unsupported_property("blocks", deprecated=True)
ftypes = _unsupported_property("ftypes", deprecated=True)
ftype = _unsupported_property("ftype", deprecated=True)
is_copy = _unsupported_property("is_copy", deprecated=True)
ix = _unsupported_property("ix", deprecated=True)
asobject = _unsupported_property("asobject", deprecated=True)
strides = _unsupported_property("strides", deprecated=True)
imag = _unsupported_property("imag", deprecated=True)
itemsize = _unsupported_property("itemsize", deprecated=True)
data = _unsupported_property("data", deprecated=True)
base = _unsupported_property("base", deprecated=True)
flags = _unsupported_property("flags", deprecated=True)
# Deprecated functions
as_blocks = _unsupported_function("as_blocks", deprecated=True)
as_matrix = _unsupported_function("as_matrix", deprecated=True)
clip_lower = _unsupported_function("clip_lower", deprecated=True)
clip_upper = _unsupported_function("clip_upper", deprecated=True)
compress = _unsupported_function("compress", deprecated=True)
get_ftype_counts = _unsupported_function("get_ftype_counts", deprecated=True)
get_value = _unsupported_function("get_value", deprecated=True)
set_value = _unsupported_function("set_value", deprecated=True)
valid = _unsupported_function("valid", deprecated=True)
to_dense = _unsupported_function("to_dense", deprecated=True)
to_sparse = _unsupported_function("to_sparse", deprecated=True)
to_msgpack = _unsupported_function("to_msgpack", deprecated=True)
compound = _unsupported_function("compound", deprecated=True)
put = _unsupported_function("put", deprecated=True)
ptp = _unsupported_function("ptp", deprecated=True)
# Functions we won't support.
real = _unsupported_property(
"real",
reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead.",
)
| apache-2.0 |
plissonf/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
enricopal/STEM | src/Serializer.py | 1 | 10103 | import os
import numpy as np
import pandas as pd
import subprocess
import optparse
from collections import Counter
from sklearn.svm import SVC
from sklearn import cross_validation
from sklearn import grid_search
import stacking_create_training_set
from sklearn.externals import joblib
import xml.etree.ElementTree as ET
import time
#train the STEM model and creates a pickle object that can be loaded successively
class Serializer:
def __init__(self, file_name, gold_standard_name, N, a):
self.file_name = file_name
self.gold_standard_name = gold_standard_name
self.N = N
self.a = a
def serialize_stem_duke(self):
start_time = time.time()
print 'Starting the entity matching process'
file_name = self.file_name #define the variables
gold_standard_name = self.gold_standard_name
N = int(self.N)
a = float(self.a)
#open files for writing
path_to_file = gold_standard_name
path_to_file = path_to_file.split('/gs/')
path_to_file = path_to_file[0]+'/'
output_file_raw = open(path_to_file+'ensemble_duke_output_raw_n%d.txt' %N,'w')
path_to_config_file = file_name.split('/')
path_to_config_list = path_to_config_file[0:-1] #the last element is the name of the file, I just want the path
#turn the list into a string by iterating and summing
path_to_config = ''
for i in path_to_config_list:
path_to_config += i
path_to_config += '/'
#output_file = open('ensemble_duke_stacking_output_T2_n%d.txt' %N,'w')
gold_standard_read = open(gold_standard_name,'rU')
#iterate for each tweaked configuration
#read actual threshold
tree = ET.parse(file_name)
root = tree.getroot()
for thresh in root.iter('threshold'):
central_thresh = float(thresh.text) #central value of the threshold
thresholds = np.linspace(central_thresh - a/2, central_thresh + a/2, N)
for threshold in thresholds:
for thresh in root.iter('threshold'):
thresh.text = str(threshold)
thresh.set('updated','yes')
path_to_config_and_name = path_to_config+'duke.xml'
tree.write(path_to_config_and_name) #generate a new modified configuration file
java_command = ["java","-Xmx5000m", "-cp", "../lib/Duke/duke-core/target/*:../lib/Duke/duke-dist/target/*:../lib/Duke/duke-es/target/*:../lib/Duke/duke-json/target/*:../lib/Duke/duke-lucene/target/*:../lib/Duke/duke-mapdb/target/*:../lib/Duke/duke-mongodb/target/*:../lib/Duke/duke-server/target/*:../lib/Duke/lucene_jar/*", "no.priv.garshol.duke.Duke", "--showmatches","--batchsize=100000", "--threads=4", "%s" %path_to_config_and_name]
output_file_raw.write(subprocess.check_output(java_command)) #call duke on the copy.xml file and write the raw output on file
output_file_raw.write('\n')
output_file_raw.write('End of run\n')
print 'End of run\n'
os.system('rm %s' %path_to_config_and_name) #remove the new modified configuration file
output_file_raw.close()
#create the training set, named training_set_T1_n%d.csv
crt_training = stacking_create_training_set.stacking_create_training_set(path_to_file+'ensemble_duke_output_raw_n%d.txt' %N,path_to_file+'training_set_n%d.csv' %N, N)
crt_training.stacking_create_training_set_duke(gold_standard_name)
#stacking_create_training_set(path_to_file+'ensemble_duke_output_raw_n%d.txt' %N,path_to_file+'training_set_n%d.csv' %N, gold_standard_name, N)
#read it and make machine learning on it
data = pd.read_csv(path_to_file+'training_set_n%d.csv' %N)
X = data.values[:,2:(N+2)] #x variables
y = np.array(data['y']) #class variables
#fit an SVM with rbf kernel
clf = SVC( kernel = 'rbf',cache_size = 1000)
#parameters = [{'kernel' : ['rbf'],'gamma' : np.logspace(-9,3,30),'C': np.logspace(-2,10,30)}, {'kernel' : ['linear'], 'C': np.logspace(-2,10,30)}]
parameters = {'gamma' : np.logspace(-9,3,30),'C': np.logspace(-2,10,30)}
gs_rbf = grid_search.GridSearchCV(clf,param_grid=parameters,cv = 4)
gs_rbf.fit(X,y)
clf = gs_rbf.best_estimator_
project_name = path_to_config_list[-1]
joblib.dump(clf, '../models/%s/svm_model_duke_N%d_a%.1f.pkl' %(project_name,N,a))
print("--- %s seconds ---" % (time.time() - start_time))
def serialize_stem_silk(self):
start_time = time.time()
file_name = self.file_name #define the variables
gold_standard_name = self.gold_standard_name
N = int(self.N)
a = float(self.a)
path_to_file = gold_standard_name #data/your_experiment/gs/gs.csv
path_to_file = path_to_file.split('/gs/')
path_to_file = path_to_file[0]+'/' #data/your_experiment/
path_to_config_file = file_name.split('/')
path_to_config_list = path_to_config_file[0:-1] #the last element is the name of the file, I just want the path, config/your_experiment/config.xml
#turn the list into a string by iterating and summing
path_to_config = ''
for i in path_to_config_list:
path_to_config += i
path_to_config += '/'
#open files for writing
output_file_raw = open(path_to_file+'ensemble_silk_output_raw_n%d.txt' %N,'w')
#output_file = open('ensemble_duke_stacking_output_T2_n%d.txt' %N,'w')
gold_standard_read = open(gold_standard_name,'rU')
#iterate for each tweaked configuration
#read actual threshold
tree = ET.parse(file_name)
root = tree.getroot()
for thresh in root.iter('Output'):
central_thresh = float(thresh.attrib['minConfidence']) #central value of the threshold
#parsing the silk xml config file to find the name of the output file
for k in root.iter('Output'):
for b in k.iter('Param'):
if b.attrib['name'] == 'file':
output_file_name = b.attrib['value']
thresholds = np.linspace(central_thresh - a/2, central_thresh + a/2, N) #list of thresholds
for threshold in thresholds:
for thresh in root.iter('Output'):
thresh.attrib['minConfidence'] = str(threshold)
print thresh.attrib['minConfidence']
path_to_config_and_name = path_to_config+'silk.xml' #dconfig/your_experiment/silk.xml
tree.write(path_to_config_and_name) #write the modified xml to file
java_command = "java -Xmx5000m -DconfigFile=%s -Dthreads=4 -jar ../lib/Silk/silk.jar" %path_to_config_and_name
os.system(java_command)
silk_output_name = path_to_config+output_file_name #config/your_experiment/links.nt
#open output file
silk_output = open(silk_output_name,'rU')
for i in silk_output.readlines():
output_file_raw.write(i)
silk_output.close()
output_file_raw.write('End of run\n')
print "End of run\n"
os.system('rm %s' %path_to_config_and_name) #remove the new modified configuration file
output_file_raw.close()
#create the training set, named training_set_T1_n%d.csv
crt_training = stacking_create_training_set.stacking_create_training_set(path_to_file+'ensemble_silk_output_raw_n%d.txt' %N,path_to_file+'training_set_silk_n%d.csv' %N, N)
crt_training.stacking_create_training_set_silk(gold_standard_name)
#read it and make machine learning on it
data = pd.read_csv(path_to_file+'training_set_silk_n%d.csv' %N)
X = data.values[:,2:(N+2)] #x variables
y = np.array(data['y']) #class variables
#fit an SVM with rbf kernel
clf = SVC( kernel = 'rbf',cache_size = 1000)
parameters = {'gamma' : np.logspace(-9,3,30),'C': np.logspace(-2,10,30)}
gs_rbf = grid_search.GridSearchCV(clf,param_grid=parameters,cv = 4)
gs_rbf.fit(X,y)
clf = gs_rbf.best_estimator_
joblib.dump(clf, 'svm_model_silk_N%d_a%f.pkl' %(N,a))
print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == '__main__':
#defining the options of the script
#INPUTS: -i duke_config.xml, -N number_of_configurations, -a amplitude_of_perturbation, -g gold_standard_name
parser = optparse.OptionParser()
parser.add_option('-i','--input', dest = 'file_name', help = 'file_name')
parser.add_option('-N','--number', dest = 'N', help = 'number of classifiers',type = int)
parser.add_option('-a','--amplitude', dest = 'a', help = 'amplitude of perturbation',type = float)
parser.add_option('-g','--gold', dest = 'gold_standard_name', help = 'gold_standard_name')
parser.add_option('-s', '--software', dest = 'software_name', help = 'software name')
(options, args) = parser.parse_args()
if options.file_name is None:
options.file_name = raw_input('Enter file name:')
if options.N is None:
options.N = raw_input('Enter number of classifiers:')
if options.a is None:
options.a = 0.05 #default to 0.05
if options.gold_standard_name is None:
options.gold_standard_name = raw_input('Enter gold standard file name:')
if options.software_name is None:
options.software_name = raw_input('Enter software name, silk or duke:')
file_name = options.file_name #define the variables
gold_standard_name = options.gold_standard_name
N = int(options.N)
a = float(options.a)
software_name = options.software_name
stem_serialize = Serializer(file_name,gold_standard_name,N,a)
if software_name == 'silk':
stem_serialize.serialize_stem_silk()
else:
stem_serialize.serialize_stem_duke()
| apache-2.0 |
scikit-hep/root_numpy | benchmarks/bench_tree2array.py | 3 | 4664 | from __future__ import print_function
from rootpy.io import TemporaryFile
import rootpy
from root_numpy import array2tree, tree2array
import numpy as np
import uuid
import random
import string
import timeit
import pickle
import platform
import matplotlib.pyplot as plt
import os
with open('hardware.pkl', 'r') as pkl:
info = pickle.load(pkl)
# construct system hardware information string
hardware = '{cpu}\nStorage: {hdd}\nROOT-{root}\nPython-{python}\nNumPy-{numpy}'.format(
cpu=info['CPU'], hdd=info['HDD'],
root=rootpy.ROOT_VERSION, python=platform.python_version(),
numpy=np.__version__)
rfile = TemporaryFile()
def randomword(length):
return ''.join(random.choice(string.lowercase) for i in range(length))
def make_tree(entries, branches=1, dtype=np.double):
dtype = np.dtype([(randomword(20), dtype) for idx in range(branches)])
array = np.zeros(entries, dtype=dtype)
return array2tree(array, name=uuid.uuid4().hex)
# warm up
print("warming up... ", end="")
for i in range(30):
tree = make_tree(100, branches=1)
branchname = tree.GetListOfBranches()[0].GetName()
tree2array(tree)
tree.Draw(branchname, "", "goff")
print("done\n")
# time vs entries
num_entries = np.logspace(1, 7, 20, dtype=np.int)
root_numpy_times = []
root_times = []
print("{0:>10} {1:<10} {2:<10}".format("entries", "root_numpy", "ROOT"))
for entries in num_entries:
print("{0:>10}".format(entries), end="")
if entries < 1e3:
iterations = 200
elif entries < 1e5:
iterations = 20
else:
iterations = 4
tree = make_tree(entries, branches=1)
branchname = tree.GetListOfBranches()[0].GetName()
root_numpy_times.append(
min(timeit.Timer('tree2array(tree)',
setup='from root_numpy import tree2array; from __main__ import tree').repeat(3, iterations)) / iterations)
root_times.append(
min(timeit.Timer('draw("{0}", "", "goff")'.format(branchname),
setup='from __main__ import tree; draw = tree.Draw').repeat(3, iterations)) / iterations)
print(" {0:10.5f}".format(root_numpy_times[-1]), end="")
print(" {0:10.5f}".format(root_times[-1]))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3.5))
ax1.plot(num_entries, root_numpy_times, '-o', label='root_numpy.tree2array()', linewidth=1.5)
ax1.plot(num_entries, root_times, '--o', label='ROOT.TTree.Draw()', linewidth=1.5)
ax1.set_xscale("log", nonposx='clip')
ax1.set_yscale("log", nonposx='clip')
ax1.legend(loc=(0.03, 0.7), frameon=False, fontsize=10)
ax1.set_ylabel('time [s]')
ax1.set_xlabel('number of entries')
ax1.text(0.03, 0.97, 'tree contains a single branch',
verticalalignment='top', horizontalalignment='left',
transform=ax1.transAxes, fontsize=12)
# time vs branches
num_branches = np.linspace(1, 10, 10, dtype=np.int)
root_numpy_times = []
root_times = []
iterations = 10
print("\n{0:>10} {1:<10} {2:<10}".format("branches", "root_numpy", "ROOT"))
for branches in num_branches:
print("{0:>10}".format(branches), end="")
tree = make_tree(1000000, branches=branches)
branchnames = [branch.GetName() for branch in tree.GetListOfBranches()]
branchname = ':'.join(branchnames)
root_numpy_times.append(
min(timeit.Timer('tree2array(tree)',
setup='from root_numpy import tree2array; from __main__ import tree').repeat(3, iterations)) / iterations)
opt = 'candle' if branches > 1 else ''
root_times.append(
min(timeit.Timer('draw("{0}", "", "goff {1}")'.format(branchname, opt),
setup='from __main__ import tree; draw = tree.Draw').repeat(3, iterations)) / iterations)
print(" {0:10.5f}".format(root_numpy_times[-1]), end="")
print(" {0:10.5f}".format(root_times[-1]))
ax2.plot(num_branches, root_numpy_times, '-o', label='root_numpy.tree2array()', linewidth=1.5)
ax2.plot(num_branches, root_times, '--o', label='ROOT.TTree.Draw()', linewidth=1.5)
#ax2.legend(loc='lower right', frameon=False, fontsize=12)
ax2.set_ylabel('time [s]')
ax2.set_xlabel('number of branches')
ax2.text(0.03, 0.97, 'tree contains 1M entries per branch',
verticalalignment='top', horizontalalignment='left',
transform=ax2.transAxes, fontsize=12)
ax2.text(0.03, 0.87, hardware,
verticalalignment='top', horizontalalignment='left',
transform=ax2.transAxes, fontsize=10)
fig.tight_layout()
fname = 'bench_tree2array_{0}.{1}'
ipng = 0
while os.path.exists(fname.format(ipng, 'png')):
ipng += 1
fig.savefig(fname.format(ipng, 'png'), transparent=True)
fig.savefig(fname.format(ipng, 'pdf'), transparent=True)
| bsd-3-clause |
B3AU/waveTree | examples/ensemble/plot_gradient_boosting_quantile.py | 14 | 2087 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import pylab as pl
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'b.', markersize=10, label=u'Observations')
pl.plot(xx, y_pred, 'r-', label=u'Prediction')
pl.plot(xx, y_upper, 'k-')
pl.plot(xx, y_lower, 'k-')
pl.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='95% prediction interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
kchodorow/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 18 | 3978 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
joshbohde/scikit-learn | examples/svm/plot_svm_anova.py | 2 | 1977 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
################################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2*np.random.random((n_samples, 200))))
################################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC())])
################################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
pl.errorbar(percentiles, score_means, np.array(score_stds))
pl.title(
'Performance of the SVM-Anova varying the percentile of features selected')
pl.xlabel('Percentile')
pl.ylabel('Prediction rate')
pl.axis('tight')
pl.show()
| bsd-3-clause |
Habasari/sms-tools | software/transformations_interface/hpsTransformations_function.py | 23 | 6610 | # function call to the transformation functions of relevance for the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
def analysis(inputFile='../../sounds/sax-phrase-short.wav', window='blackman', M=601, N=1024, t=-100,
minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01, stocf=0.1):
"""
Analyze a sound with the harmonic plus stochastic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics
minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound
f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
stocf: decimation factor used for the stochastic approximation
returns inputFile: input file name; fs: sampling rate of input file,
hfreq, hmag: harmonic frequencies, magnitude; mYst: stochastic residual
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# compute the harmonic plus stochastic model of the whole sound
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
# synthesize the harmonic plus stochastic model without original phases
y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, np.array([]), mYst, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModel.wav'
UF.wavwrite(y,fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot spectrogram stochastic compoment
plt.subplot(3,1,2)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram
if (hfreq.shape[1] > 0):
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
return inputFile, fs, hfreq, hmag, mYst
def transformation_synthesis(inputFile, fs, hfreq, hmag, mYst, freqScaling = np.array([0, 1.2, 2.01, 1.2, 2.679, .7, 3.146, .7]),
freqStretching = np.array([0, 1, 2.01, 1, 2.679, 1.5, 3.146, 1.5]), timbrePreservation = 1,
timeScaling = np.array([0, 0, 2.138, 2.138-1.0, 3.146, 3.146])):
"""
transform the analysis values returned by the analysis function and synthesize the sound
inputFile: name of input file
fs: sampling rate of input file
hfreq, hmag: harmonic frequencies and magnitudes
mYst: stochastic residual
freqScaling: frequency scaling factors, in time-value pairs (value of 1 no scaling)
freqStretching: frequency stretching factors, in time-value pairs (value of 1 no stretching)
timbrePreservation: 1 preserves original timbre, 0 it does not
timeScaling: time scaling factors, in time-value pairs
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# frequency scaling of the harmonics
hfreqt, hmagt = HT.harmonicFreqScaling(hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs)
# time scaling the sound
yhfreq, yhmag, ystocEnv = HPST.hpsTimeScale(hfreqt, hmagt, mYst, timeScaling)
# synthesis from the trasformed hps representation
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModelTransformation.wav'
UF.wavwrite(y,fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 6))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram of transformed stochastic compoment
plt.subplot(2,1,1)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
if (yhfreq.shape[1] > 0):
harms = yhfreq*np.less(yhfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# analysis
inputFile, fs, hfreq, hmag, mYst = analysis()
# transformation and synthesis
transformation_synthesis(inputFile, fs, hfreq, hmag, mYst)
plt.show()
| agpl-3.0 |
google/timesketch | timesketch/lib/analyzers/browser_timeframe.py | 1 | 11171 | """Sketch analyzer plugin for browser timeframe."""
from __future__ import unicode_literals
import collections
import pandas as pd
from timesketch.lib import emojis
from timesketch.lib.analyzers import interface
from timesketch.lib.analyzers import manager
from timesketch.lib.analyzers import utils
def get_list_of_consecutive_sequences(hour_list):
"""Returns a list of runs from a list of numbers.
Args:
hour_list: a list of integers.
Returns:
A list of tuples, where each tuple indicates the first
and last record of a consecutive run of numbers, eg
list 1, 2, 3, 7, 8, 9 would produce the output
of (1,3), (7, 9).
"""
runs = []
if not hour_list:
return runs
start = hour_list[0]
now = start
for hour in hour_list[1:]:
if hour == (now + 1):
now = hour
continue
runs.append((start, now))
start = hour
now = start
if not runs:
runs = [(hour_list[0], hour_list[-1])]
last_start, last_end = runs[-1]
if (start != last_start) and (last_end != now):
runs.append((start, now))
return runs
def fix_gap_in_list(hour_list):
"""Returns a list with gaps in it fixed.
Args:
hour_list: a list of integers in a sequence, potentially
with holes in the sequence.
Returns:
A list that consists of the input numbers with single
integer gaps filled. The list should not have more than
two runs. Therefore if there are more than two runs after
all gaps have been filled the "extra" runs will be dropped.
"""
if not hour_list:
return hour_list
runs = get_list_of_consecutive_sequences(hour_list)
len_runs = len(runs)
for i in range(0, len_runs - 1):
_, upper = runs[i]
next_lower, _ = runs[i+1]
if (upper + 1) == (next_lower - 1):
hour_list.append(upper + 1)
hours = sorted(hour_list)
runs = get_list_of_consecutive_sequences(hour_list)
if len(runs) <= 2:
return hours
if len_runs < len(runs):
return fix_gap_in_list(hours)
# Now we need to remove runs, we only need the first and last.
run_start = runs[0]
run_end = runs[-1]
hours = list(range(0, run_start[1] + 1))
hours.extend(range(run_end[0], run_end[1] + 1))
return sorted(hours)
def get_active_hours(frame):
"""Return a list of the hours with the most activity within a frame.
Args:
frame: a pandas DataFrame object that contains a datetime column.
Returns:
A tuple that contains three items:
1. list of hours where the most activity within the DataFrame
occurs.
2. the threshold used to determine what is considered to be
an active hour.
3. a DataFrame object containing the aggregation.
"""
frame_count = frame[['datetime', 'hour']].groupby(
'hour', as_index=False).count()
frame_count['count'] = frame_count['datetime']
del frame_count['datetime']
stats = frame_count['count'].describe()
# Define few different options for the threshold value of what constitutes
# an active hour. Then we choose the method that has the highest active
# hour count, as long as it is between 3 and 12 hours.
thresholds = {
stats['75%'] - stats['mean']: 0,
stats['75%']: 0,
stats['50%']: 0,
stats['25%']: 0
}
for threshold in thresholds:
threshold_filter = frame_count['count'] >= threshold
hours = list(frame_count[threshold_filter].hour.values)
hours = sorted(hours)
hour_len = len(hours)
if 3 <= hour_len <= 12:
thresholds[threshold] = hour_len
threshold_counter = collections.Counter(thresholds)
threshold, _ = threshold_counter.most_common(1)[0]
threshold_filter = frame_count['count'] >= threshold
hours = list(frame_count[threshold_filter].hour.values)
hours = sorted(hours)
runs = get_list_of_consecutive_sequences(hours)
# There should either be a single run or at most two.
number_runs = len(runs)
if number_runs == 1:
return hours, threshold, frame_count
if number_runs == 2 and runs[0][0] == 0:
# Two runs, first one starts at hour zero.
return hours, threshold, frame_count
return fix_gap_in_list(hours), threshold, frame_count
class BrowserTimeframeSketchPlugin(interface.BaseAnalyzer):
"""Analyzer for BrowserTimeframe."""
NAME = 'browser_timeframe'
DISPLAY_NAME = 'Browser timeframe'
DESCRIPTION = ('Determine user activity hours by finding the frequency of'
'browsing events')
DEPENDENCIES = frozenset()
def run(self):
"""Entry point for the analyzer.
Returns:
String with summary of the analyzer result
"""
# TODO: Once we can identify user generated events this should be
# updated to include all user generated events instead of focusing
# solely on browser events.
query = 'source_short:"WEBHIST" OR source:"WEBHIST"'
return_fields = ['datetime', 'timestamp', 'url', 'tag', '__ts_emojis']
data_frame = self.event_pandas(
query_string=query, return_fields=return_fields)
if not data_frame.shape[0]:
return 'No browser events discovered.'
sleeping_emoji = emojis.get_emoji('SLEEPING_FACE')
# This query filters out all timestamps that have a zero timestamp as
# well as those that occur after 2038-01-01, this may need to be
# changed in the future.
data_frame['timestamp'] = pd.to_numeric(data_frame.timestamp)
data_frame = data_frame[
(data_frame.timestamp > 0) & (
data_frame.timestamp < 2145916800000000)]
data_frame['datetime'] = pd.to_datetime(
data_frame.timestamp / 1e6, utc=True, unit='s')
data_frame['hour'] = pd.to_numeric(
data_frame.datetime.dt.strftime('%H'))
total_count = data_frame.shape[0]
activity_hours, threshold, aggregation = get_active_hours(data_frame)
if not activity_hours:
return 'Did not discover any activity hours.'
hour_count = dict(aggregation.values.tolist())
data_frame_outside = data_frame[~data_frame.hour.isin(activity_hours)]
for event in utils.get_events_from_data_frame(
data_frame_outside, self.datastore):
event.add_tags(['outside-active-hours'])
hour = event.source.get('hour')
this_hour_count = hour_count.get(hour)
event.add_attributes(
{'activity_summary': (
'Number of events for this hour ({0:d}): {1:d}, with the '
'threshold value: {2:0.2f}').format(
hour, this_hour_count, threshold),
'hour_count': this_hour_count})
event.add_emojis([sleeping_emoji])
event.commit()
tagged_events, _ = data_frame_outside.shape
if tagged_events:
story = self.sketch.add_story('{0:s} - {1:s}'.format(
utils.BROWSER_STORY_TITLE, self.timeline_name))
story.add_text(
utils.BROWSER_STORY_HEADER, skip_if_exists=True)
# Find some statistics about the run time of the analyzer.
percent = (tagged_events / total_count) * 100.0
last_hour = activity_hours[0]
end = 0
for hour in activity_hours[1:]:
if hour != last_hour + 1:
end = hour
break
last_hour = hour
if not end:
first = activity_hours[0]
last = activity_hours[-1]
else:
first = end
index = activity_hours.index(end)
last = activity_hours[index - 1]
story.add_text(
'## Browser Timeframe Analyzer\n\nThe browser timeframe '
'analyzer discovered {0:d} browser events that occurred '
'outside of the typical browsing window of this browser '
'history ({1:s}), or around {2:0.2f}% of the {3:d} total '
'events.\n\nThe analyzer determines the activity hours by '
'finding the frequency of browsing events per hour, and then '
'discovering the longest block of most active hours before '
'proceeding with flagging all events outside of that time '
'period. This information can be used by other analyzers '
'or by manually looking for other activity within the '
'inactive time period to find unusual actions.\n\n'
'The hours considered to be active hours are the hours '
'between {4:02d} and {5:02d} (hours in UTC) and the '
'threshold used to determine if an hour was considered to be '
'active was: {6:0.2f}.'.format(
tagged_events, self.timeline_name, percent, total_count,
first, last, threshold))
group = self.sketch.add_aggregation_group(
name='Browser Activity Per Hour',
description='Created by the browser timeframe analyzer')
group.set_layered()
params = {
'data': aggregation.to_dict(orient='records'),
'title': 'Browser Activity Per Hour ({0:s})'.format(
self.timeline_name),
'field': 'hour',
'order_field': 'hour',
}
agg_obj = self.sketch.add_aggregation(
name='Browser Activity Per Hour ({0:s})'.format(
self.timeline_name), agg_name='manual_feed',
agg_params=params, chart_type='barchart',
description='Created by the browser timeframe analyzer',
label='informational')
group.add_aggregation(agg_obj)
lines = [{'hour': x, 'count': threshold} for x in range(0, 24)]
params = {
'data': lines,
'title': 'Browser Timeframe Threshold ({0:s})'.format(
self.timeline_name),
'field': 'hour',
'order_field': 'hour',
'chart_color': 'red',
}
agg_line = self.sketch.add_aggregation(
name='Browser Activity Per Hour ({0:s})'.format(
self.timeline_name), agg_name='manual_feed',
agg_params=params, chart_type='linechart',
description='Created by the browser timeframe analyzer',
label='informational')
group.add_aggregation(agg_line)
story.add_aggregation_group(group)
return (
'Tagged {0:d} out of {1:d} events as outside of normal '
'active hours.').format(tagged_events, total_count)
manager.AnalysisManager.register_analyzer(BrowserTimeframeSketchPlugin)
| apache-2.0 |
spallavolu/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
TeamHG-Memex/eli5 | tests/test_lime.py | 1 | 6702 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
from sklearn.feature_extraction.text import HashingVectorizer, CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import make_pipeline
import pytest
from eli5.lime import TextExplainer
from eli5.formatters import format_as_text
from .utils import format_as_all, check_targets_scores
def test_lime_explain_probabilistic(newsgroups_train):
docs, y, target_names = newsgroups_train
try:
vec = HashingVectorizer(alternate_sign=False)
except TypeError:
# sklearn < 0.19
vec = HashingVectorizer(non_negative=True)
clf = MultinomialNB()
X = vec.fit_transform(docs)
clf.fit(X, y)
print(clf.score(X, y))
pipe = make_pipeline(vec, clf)
doc = docs[0]
te = TextExplainer(random_state=42)
te.fit(doc, pipe.predict_proba)
print(te.metrics_)
assert te.metrics_['score'] > 0.7
assert te.metrics_['mean_KL_divergence'] < 0.1
res = te.explain_prediction(top=20, target_names=target_names)
expl = format_as_text(res)
print(expl)
assert 'file' in expl
def test_lime_flat_neighbourhood(newsgroups_train):
docs, y, target_names = newsgroups_train
doc = docs[0]
@_apply_to_list
def predict_proba(doc):
""" This function predicts non-zero probabilities only for 3 labels """
proba_graphics = [0, 1.0, 0, 0]
proba_other = [0.9, 0, 0.1, 0]
return proba_graphics if 'file' in doc else proba_other
te = TextExplainer(expand_factor=None, random_state=42)
te.fit(doc, predict_proba)
print(te.metrics_)
print(te.clf_.classes_, target_names)
res = te.explain_prediction(top=20, target_names=target_names)
for expl in format_as_all(res, te.clf_):
assert 'file' in expl
assert "comp.graphics" in expl
@pytest.mark.parametrize(['token_pattern'],
[[None], ['.']])
def test_text_explainer_char_based(token_pattern):
text = "Hello, world!"
predict_proba = substring_presence_predict_proba('lo')
te = TextExplainer(char_based=True, token_pattern=token_pattern)
te.fit(text, predict_proba)
print(te.metrics_)
assert te.metrics_['score'] > 0.95
assert te.metrics_['mean_KL_divergence'] < 0.1
res = te.explain_prediction()
format_as_all(res, te.clf_)
check_targets_scores(res)
assert res.targets[0].feature_weights.pos[0].feature == 'lo'
# another way to look at results (not that useful for char ngrams)
res = te.explain_weights()
assert res.targets[0].feature_weights.pos[0].feature == 'lo'
def test_text_explainer_position_dependent():
text = "foo bar baz egg spam bar baz egg spam ham"
@_apply_to_list
def predict_proba(doc):
tokens = doc.split()
# 'bar' is only important in the beginning of the document,
# not in the end
return [0, 1] if len(tokens) >= 2 and tokens[1] == 'bar' else [1, 0]
# bag of words model is not powerful enough to explain predict_proba above
te = TextExplainer(random_state=42, vec=CountVectorizer())
te.fit(text, predict_proba)
print(te.metrics_)
assert te.metrics_['score'] < 0.9
assert te.metrics_['mean_KL_divergence'] > 0.3
# position_dependent=True can make it work
te = TextExplainer(position_dependent=True, random_state=42)
te.fit(text, predict_proba)
print(te.metrics_)
assert te.metrics_['score'] > 0.95
assert te.metrics_['mean_KL_divergence'] < 0.3
expl = te.explain_prediction()
format_as_all(expl, te.clf_)
# it is also possible to almost make it work using a custom vectorizer
vec = CountVectorizer(ngram_range=(1, 2))
te = TextExplainer(vec=vec, random_state=42)
te.fit(text, predict_proba)
print(te.metrics_)
assert te.metrics_['score'] > 0.95
assert te.metrics_['mean_KL_divergence'] < 0.3
expl = te.explain_prediction()
format_as_all(expl, te.clf_)
# custom vectorizers are not supported when position_dependent is True
with pytest.raises(ValueError):
te = TextExplainer(position_dependent=True, vec=HashingVectorizer())
def test_text_explainer_custom_classifier():
text = "foo-bar baz egg-spam"
predict_proba = substring_presence_predict_proba('bar')
# use decision tree to explain the prediction
te = TextExplainer(clf=DecisionTreeClassifier(max_depth=2))
te.fit(text, predict_proba)
print(te.metrics_)
assert te.metrics_['score'] > 0.99
assert te.metrics_['mean_KL_divergence'] < 0.01
expl = te.explain_prediction()
format_as_all(expl, te.clf_)
# with explain_weights we can get a nice tree representation
expl = te.explain_weights()
print(expl.decision_tree.tree)
assert expl.decision_tree.tree.feature_name == "bar"
format_as_all(expl, te.clf_)
def test_text_explainer_token_pattern():
text = "foo-bar baz egg-spam"
predict_proba = substring_presence_predict_proba('bar')
# a different token_pattern
te = TextExplainer(token_pattern=r'(?u)\b[-\w]+\b')
te.fit(text, predict_proba)
print(te.metrics_)
assert te.metrics_['score'] > 0.95
assert te.metrics_['mean_KL_divergence'] < 0.1
expl = te.explain_prediction()
format_as_all(expl, te.clf_)
assert expl.targets[0].feature_weights.pos[0].feature == 'foo-bar'
def test_text_explainer_show_methods():
pytest.importorskip('IPython')
from IPython.display import HTML
text = "Hello, world!"
@_apply_to_list
def predict_proba(doc):
return [0.0, 1.0] if 'lo' in doc else [1.0, 0.0]
te = TextExplainer()
te.fit(text, predict_proba)
pred_expl = te.show_prediction()
assert isinstance(pred_expl, HTML)
assert 'lo' in pred_expl.data
weight_expl = te.show_weights()
assert isinstance(weight_expl, HTML)
assert 'lo' in weight_expl.data
def test_text_explainer_rbf_sigma():
text = 'foo bar baz egg spam'
predict_proba = substring_presence_predict_proba('bar')
te1 = TextExplainer().fit(text, predict_proba)
te2 = TextExplainer(rbf_sigma=0.1).fit(text, predict_proba)
te3 = TextExplainer(rbf_sigma=1.0).fit(text, predict_proba)
assert te1.similarity_.sum() < te3.similarity_.sum()
assert te1.similarity_.sum() > te2.similarity_.sum()
def substring_presence_predict_proba(substring):
@_apply_to_list
def predict_proba(doc):
return [0, 1] if substring in doc else [1, 0]
return predict_proba
def _apply_to_list(func):
def wrapper(docs):
return np.array([func(doc) for doc in docs])
return wrapper
| mit |
OshynSong/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
madjelan/scikit-learn | sklearn/externals/joblib/__init__.py | 86 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
jpautom/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
diogo149/BooMLet | boomlet/utils/estimators.py | 1 | 3085 | import math
from copy import deepcopy
import numpy as np
from sklearn.cross_validation import ShuffleSplit
from sklearn.preprocessing import LabelBinarizer
def flexible_int(size, in_val=None):
""" allows for flexible input as a size
"""
if in_val is None:
return size
elif isinstance(in_val, (float, int)):
if isinstance(in_val, float):
assert abs(in_val) <= 1.0, in_val
in_val = int(round(in_val * size))
if in_val < 0:
in_val += size # setting negative values as amount not taken
return max(0, min(size, in_val))
elif isinstance(in_val, str):
if in_val == "sqrt":
return int(round(math.sqrt(size)))
elif in_val == "log2":
return int(round(math.log(size) / math.log(2)))
elif in_val == "auto":
return size
raise Exception("Improper flexible_int input: {}".format(in_val))
def fit_predict(clf, X, y, X_test):
tmp_clf = deepcopy(clf)
tmp_clf.fit(X, y)
return tmp_clf.predict(X_test)
def quick_cv(clf,
X,
y,
score_func,
n_iter=3,
test_size=0.1,
random_state=None):
""" returns the cross validation """
cv = ShuffleSplit(y.shape[0],
n_iter=n_iter,
test_size=test_size,
random_state=random_state,
)
scores = []
for train, test in cv:
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
preds = fit_predict(clf, X_train, y_train, X_test)
scores.append(score_func(y_test, preds))
return sum(scores) / float(len(scores))
def quick_score(clf,
X,
y,
score_func,
X_valid=None,
y_valid=None,
n_iter=3,
test_size=0.1,
random_state=None):
"""scores on a validation set, if available, else with cross validation"""
if X_valid is not None:
return score_func(y_valid, fit_predict(clf, X, y, X_valid))
else:
return quick_cv(clf, X, y, score_func, n_iter, test_size, random_state)
def gaussian_kernel_median_trick(X, sample_size='sqrt'):
"""
From: http://www.machinedlearnings.com/2013/08/cosplay.html
estimate a kernel bandwidth using the "median trick"
this is a standard Gaussian kernel technique
"""
sample_size = flexible_int(X.shape[0], sample_size)
perm = np.random.permutation(np.arange(X.shape[0]))
sample = X[perm[:sample_size]]
norms = np.sum(sample ** 2, axis=1).reshape(-1, 1)
tmp = np.dot(norms, np.ones((1, sample_size)))
dist = tmp + tmp.T - 2 * np.dot(sample, sample.T)
scale = 1 / np.sqrt(np.median(dist))
return scale
def binarizer_from_classifier(clf):
"""
returns a LabelBinarizer with the position of each class
corresponding to that of an input classifier
"""
lb = LabelBinarizer()
lb.multilabel_ = False
lb.classes_ = clf.classes_
return lb
| gpl-3.0 |
cxxgtxy/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 44 | 8747 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers, self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [
np.mean(
points[assignments == center], axis=0)
for center in xrange(num_centers)
]
covs = [
np.cov(points[assignments == center].T)
for center in xrange(num_centers)
]
scores = []
for r in xrange(num_points):
scores.append(
np.sqrt(
np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])), points[r, :] -
means[assignments[r]])))
return (points, assignments, scores)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments, true_offsets = (
self.make_random_points(clusters, num_points))
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(input_fn=self.input_fn(points=points,
batch_size=num_points), steps=1)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
if __name__ == '__main__':
test.main()
| apache-2.0 |
rampasek/seizure-prediction | pkl2mat.py | 1 | 1759 | import time
import argparse
import scipy
import scipy.io
import numpy as np
import math
from scipy import signal
from scipy.fftpack import fft, fftshift
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import sklearn
import sklearn.preprocessing
import functools
import cPickle
import gzip
def parseMatFile(filename):
"""
Read and parse input .mat file
Returns numpy array "data" and dict "seg" with segment info
data: [16x239766 double]
data_length_sec: 600
sampling_frequency: 399.6098
channels: {1x16 cell}
sequence: 1
"""
mat = scipy.io.loadmat(filename)
seg = {}
seg['name'] = [s for s in mat.keys() if "segment" in s][0]
mat = mat[seg['name']][0][0]
data = mat[0]
seg['data_length_sec'] = mat[1][0][0]
seg['sampling_frequency'] = mat[2][0][0]
seg['channels'] = [str(x[0]) for x in mat[3][0]]
#seg['sequence'] = mat[4][0][0]
return data, seg
def main():
#parse command line arguments
parser = argparse.ArgumentParser(description='Convert pickle files to matlab files')
parser.add_argument('input', type=str, nargs=1, help='input pkl file')
args = parser.parse_args()
in_fname = args.input[0]
#data, seg = parseMatFile(in_fname)
out_fname = in_fname[:in_fname.find(".pkl")] + ".mat"
#print out_fname
dat = {}
file = open(in_fname, "r")
dat['features'] = cPickle.load(file);
file.close()
scipy.io.savemat(out_fname, dat)
#with open(out_fname, 'wb') as fp:
# cPickle.dump(features, fp)
# cPickle.dump(label, fp)
if __name__ == "__main__":
#import doctest
#doctest.testmod()
main()
| gpl-2.0 |
ngoix/OCRF | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
kieselai/Math-CV | python-code/Classifier.py | 1 | 1375 | """
Anthony Kiesel
CS 5600
Generic SVM Classifier that persists through pickle files
"""
from datetime import datetime
from sklearn import svm
from DataManager import DataManager
from IPython import embed
class SVMClassifier():
"""
Generic SVM Classifier that persists through pickle files
"""
def __init__(self, export_file, training_set, gamma=1e-8):
self.gamma = gamma
self.export_file = export_file
self.training_set = training_set
self.svm = None
def load_classifier(self):
""" Load a saved classifier from a pickle file """
self.svm = DataManager.import_pickle_file(self.export_file, self.train_new_classifier)
def train_new_classifier(self):
""" Train a new classifier from training data """
print("STARTING TRAINING: {}".format(datetime.now().time()))
data = self.training_set.get_training_data()
labels = self.training_set.get_training_labels()
classifier = svm.SVC(gamma=self.gamma, kernel='linear')
classifier.fit(data, labels)
print("FINISHED: {}".format(datetime.now().time()))
return classifier
def get_classifier(self):
""" Returns the classifier if it has already been loaded, otherwise the classifier is loaded. """
if self.svm is None:
self.load_classifier()
return self.svm
def predict(self, data_sample):
""" Returns a prediction from a trained classifier """
return self.svm.predict(data_sample)
| apache-2.0 |
btabibian/scikit-learn | examples/linear_model/plot_sgd_iris.py | 6 | 2205 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
# we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired, edgecolor='black', s=20)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
MadsJensen/RP_scripts | perm_test_classifier.py | 1 | 1878 | import numpy as np
import bct
from sklearn.externals import joblib
from my_settings import *
from sklearn.ensemble import AdaBoostClassifier
from sklearn.cross_validation import (StratifiedShuffleSplit, cross_val_score,
permutation_test_score)
from sklearn.grid_search import GridSearchCV
subjects = ["0008", "0009", "0010", "0012", "0014", "0015", "0016",
"0017", "0018", "0019", "0020", "0021", "0022"]
cls_all = []
pln_all = []
scores_all = np.empty([4, 10])
results_all = {}
for subject in subjects:
cls = np.load(source_folder + "graph_data/%s_classic_pow_pln.npy" %
subject).item()
pln = np.load(source_folder + "graph_data/%s_plan_pow_pln.npy" %
subject).item()
cls_all.append(cls)
pln_all.append(pln)
for k, band in enumerate(bands.keys()):
data_cls = []
for j in range(len(cls_all)):
tmp = cls_all[j][band]
data_cls.append(np.asarray([bct.centrality.pagerank_centrality(
g, d=0.85) for g in tmp]).mean(axis=0))
data_pln = []
for j in range(len(pln_all)):
tmp = pln_all[j][band]
data_pln.append(np.asarray([bct.centrality.pagerank_centrality(
g, d=0.85) for g in tmp]).mean(axis=0))
data_cls = np.asarray(data_cls)
data_pln = np.asarray(data_pln)
X = np.vstack([data_cls, data_pln])
y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])
cv = StratifiedShuffleSplit(y, test_size=0.1)
model = joblib.load(source_folder +
"graph_data/sk_models/pagerank_ada_%s.plk" % band)
score, perm_scores, pval = permutation_test_score(
model, X, y, cv=cv, n_permutations=2000, n_jobs=4)
result = {"score": score,
"perm_scores": perm_scores,
"pval": pval}
results_all[band] = result
| bsd-3-clause |
gfyoung/pandas | pandas/tests/reshape/merge/test_join.py | 1 | 31085 | import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
merge,
)
import pandas._testing as tm
from pandas.tests.reshape.merge.test_merge import NGROUPS, N, get_test_data
a_ = np.array
class TestJoin:
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame(
{
"key1": get_test_data(),
"key2": get_test_data(),
"data1": np.random.randn(N),
"data2": np.random.randn(N),
}
)
# exclude a couple keys for fun
self.df = self.df[self.df["key2"] > 1]
self.df2 = DataFrame(
{
"key1": get_test_data(n=N // 5),
"key2": get_test_data(ngroups=NGROUPS // 2, n=N // 5),
"value": np.random.randn(N // 5),
}
)
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame(
{"MergedA": data["A"], "MergedD": data["D"]}, index=data["C"]
)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on="key2")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="left")
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="left")
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on="key2", how="right")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="right")
joined_both = merge(self.df, self.df2, how="right")
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="right")
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on="key2", how="outer")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="outer")
joined_both = merge(self.df, self.df2, how="outer")
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="outer")
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on="key2", how="inner")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="inner")
joined_both = merge(self.df, self.df2, how="inner")
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="inner")
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on="key2", suffixes=(".foo", ".bar"))
assert "key1.foo" in joined
assert "key1.bar" in joined
def test_handle_overlap_arbitrary_key(self):
joined = merge(
self.df,
self.df2,
left_on="key2",
right_on="key1",
suffixes=(".foo", ".bar"),
)
assert "key1.foo" in joined
assert "key2.bar" in joined
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on="C")
tm.assert_series_equal(merged["MergedA"], target["A"], check_names=False)
tm.assert_series_equal(merged["MergedD"], target["D"], check_names=False)
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
joined = df.join(df2, on="key")
expected = DataFrame(
{"key": ["a", "a", "b", "b", "c"], "value": [0, 0, 1, 1, 2]}
)
tm.assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=["a", "b", "c"], columns=["one"])
df_b = DataFrame([["foo"], ["bar"]], index=[1, 2], columns=["two"])
df_c = DataFrame([[1], [2]], index=[1, 2], columns=["three"])
joined = df_a.join(df_b, on="one")
joined = joined.join(df_c, on="one")
assert np.isnan(joined["two"]["c"])
assert np.isnan(joined["three"]["c"])
# merge column not p resent
with pytest.raises(KeyError, match="^'E'$"):
target.join(source, on="E")
# overlap
source_copy = source.copy()
source_copy["A"] = 0
msg = (
"You are trying to merge on float64 and object columns. If "
"you wish to proceed you should use pd.concat"
)
with pytest.raises(ValueError, match=msg):
target.join(source_copy, on="A")
def test_join_on_fails_with_different_right_index(self):
df = DataFrame(
{"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)}
)
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)},
index=tm.makeCustomIndex(10, 2),
)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, left_on="a", right_index=True)
def test_join_on_fails_with_different_left_index(self):
df = DataFrame(
{"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)},
index=tm.makeCustomIndex(3, 2),
)
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)}
)
msg = r'len\(right_on\) must equal the number of levels in the index of "left"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on="b", left_index=True)
def test_join_on_fails_with_different_column_counts(self):
df = DataFrame(
{"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)}
)
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)},
index=tm.makeCustomIndex(10, 2),
)
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on="a", left_on=["a", "b"])
@pytest.mark.parametrize("wrong_type", [2, "str", None, np.array([0, 1])])
def test_join_on_fails_with_wrong_object_type(self, wrong_type):
# GH12081 - original issue
# GH21220 - merging of Series and DataFrame is now allowed
# Edited test to remove the Series object from test parameters
df = DataFrame({"a": [1, 1]})
msg = (
"Can only merge Series or DataFrame objects, "
f"a {type(wrong_type)} was passed"
)
with pytest.raises(TypeError, match=msg):
merge(wrong_type, df, left_on="a", right_on="a")
with pytest.raises(TypeError, match=msg):
merge(df, wrong_type, left_on="a", right_on="a")
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on="C")
del expected["C"]
join_col = self.target.pop("C")
result = self.target.join(self.source, on=join_col)
tm.assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on="C")
for col in self.source:
assert col in merged
assert merged[col].isna().all()
merged2 = self.target.join(self.source.reindex([]), on="C", how="inner")
tm.assert_index_equal(merged2.columns, merged.columns)
assert len(merged2) == 0
def test_join_on_inner(self):
df = DataFrame({"key": ["a", "a", "d", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1]}, index=["a", "b"])
joined = df.join(df2, on="key", how="inner")
expected = df.join(df2, on="key")
expected = expected[expected["value"].notna()]
tm.assert_series_equal(joined["key"], expected["key"])
tm.assert_series_equal(joined["value"], expected["value"], check_dtype=False)
tm.assert_index_equal(joined.index, expected.index)
def test_join_on_singlekey_list(self):
df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
# corner cases
joined = df.join(df2, on=["key"])
expected = df.join(df2, on="key")
tm.assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source["MergedA"], on="C")
expected = self.target.join(self.source[["MergedA"]], on="C")
tm.assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({"a": [1, 1]})
ds = Series([2], index=[1], name="b")
result = df.join(ds, on="a")
expected = DataFrame({"a": [1, 1], "b": [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self, join_type):
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1["bool"] = True
df1["string"] = "foo"
df2 = DataFrame(index=np.arange(5, 15))
df2["int"] = 1
df2["float"] = 1.0
joined = df1.join(df2, how=join_type)
expected = _join_by_hand(df1, df2, how=join_type)
tm.assert_frame_equal(joined, expected)
joined = df2.join(df1, how=join_type)
expected = _join_by_hand(df2, df1, how=join_type)
tm.assert_frame_equal(joined, expected)
def test_join_index_mixed_overlap(self):
df1 = DataFrame(
{"A": 1.0, "B": 2, "C": "foo", "D": True},
index=np.arange(10),
columns=["A", "B", "C", "D"],
)
assert df1["B"].dtype == np.int64
assert df1["D"].dtype == np.bool_
df2 = DataFrame(
{"A": 1.0, "B": 2, "C": "foo", "D": True},
index=np.arange(0, 10, 2),
columns=["A", "B", "C", "D"],
)
# overlap
joined = df1.join(df2, lsuffix="_one", rsuffix="_two")
expected_columns = [
"A_one",
"B_one",
"C_one",
"D_one",
"A_two",
"B_two",
"C_two",
"D_two",
]
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
tm.assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=["A"]), how="outer")
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(np.random.randn(30, 2), columns=["a", "b"])
c = Series(np.random.randn(30))
a["c"] = c
d = DataFrame(np.random.randn(30, 1), columns=["q"])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays(
[["a", "a", "a", "b", "b", "b"], [1, 2, 3, 1, 2, 3]],
names=["first", "second"],
)
index2 = MultiIndex.from_arrays(
[["b", "b", "b", "c", "c", "c"], [1, 2, 3, 1, 2, 3]],
names=["first", "second"],
)
df1 = DataFrame(data=np.random.randn(6), index=index1, columns=["var X"])
df2 = DataFrame(data=np.random.randn(6), index=index2, columns=["var Y"])
df1 = df1.sort_index(level=0)
df2 = df2.sort_index(level=0)
joined = df1.join(df2, how="outer")
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
tm.assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
df1 = df1.sort_index(level=1)
df2 = df2.sort_index(level=1)
joined = df1.join(df2, how="outer").sort_index(level=0)
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
tm.assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
def test_join_inner_multiindex(self):
key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"]
key2 = [
"two",
"one",
"three",
"one",
"two",
"one",
"two",
"two",
"three",
"one",
]
data = np.random.randn(len(key1))
data = DataFrame({"key1": key1, "key2": key2, "data": data})
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
to_join = DataFrame(
np.random.randn(10, 3), index=index, columns=["j_one", "j_two", "j_three"]
)
joined = data.join(to_join, on=["key1", "key2"], how="inner")
expected = merge(
data,
to_join.reset_index(),
left_on=["key1", "key2"],
right_on=["first", "second"],
how="inner",
sort=False,
)
expected2 = merge(
to_join,
data,
right_on=["key1", "key2"],
left_index=True,
how="inner",
sort=False,
)
tm.assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(
to_join,
data,
right_on=["key1", "key2"],
left_index=True,
how="inner",
sort=False,
)
expected = expected.drop(["first", "second"], axis=1)
expected.index = joined.index
assert joined.index.is_monotonic
tm.assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.loc[:, expected.columns])
def test_join_hierarchical_mixed(self):
# GH 2024
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "c"])
new_df = df.groupby(["a"]).agg({"b": [np.mean, np.sum]})
other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"])
other_df.set_index("a", inplace=True)
# GH 9455, 12219
with tm.assert_produces_warning(UserWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
assert ("b", "mean") in result
assert "b" in result
def test_join_float64_float32(self):
a = DataFrame(np.random.randn(10, 2), columns=["a", "b"], dtype=np.float64)
b = DataFrame(np.random.randn(10, 1), columns=["c"], dtype=np.float32)
joined = a.join(b)
assert joined.dtypes["a"] == "float64"
assert joined.dtypes["b"] == "float64"
assert joined.dtypes["c"] == "float32"
a = np.random.randint(0, 5, 100).astype("int64")
b = np.random.random(100).astype("float64")
c = np.random.random(100).astype("float32")
df = DataFrame({"a": a, "b": b, "c": c})
xpdf = DataFrame({"a": a, "b": b, "c": c})
s = DataFrame(np.random.random(5).astype("float32"), columns=["md"])
rs = df.merge(s, left_on="a", right_index=True)
assert rs.dtypes["a"] == "int64"
assert rs.dtypes["b"] == "float64"
assert rs.dtypes["c"] == "float32"
assert rs.dtypes["md"] == "float32"
xp = xpdf.merge(s, left_on="a", right_index=True)
tm.assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how="outer")
df_partially_merged = merge(df1, df2, on=["a", "b"], how="outer")
expected = merge(df_partially_merged, df3, on=["a", "b"], how="outer")
result = result.reset_index()
expected = expected[result.columns]
expected["a"] = expected.a.astype("int64")
expected["b"] = expected.b.astype("int64")
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how="inner")
df_partially_merged = merge(df1, df2, on=["a", "b"], how="inner")
expected = merge(df_partially_merged, df3, on=["a", "b"], how="inner")
result = result.reset_index()
tm.assert_frame_equal(result, expected.loc[:, result.columns])
# GH 11519
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
}
)
s = Series(
np.repeat(np.arange(8), 2), index=np.repeat(np.arange(8), 2), name="TEST"
)
inner = df.join(s, how="inner")
outer = df.join(s, how="outer")
left = df.join(s, how="left")
right = df.join(s, how="right")
tm.assert_frame_equal(inner, outer)
tm.assert_frame_equal(inner, left)
tm.assert_frame_equal(inner, right)
def test_join_sort(self):
left = DataFrame({"key": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]})
right = DataFrame({"value2": ["a", "b", "c"]}, index=["bar", "baz", "foo"])
joined = left.join(right, on="key", sort=True)
expected = DataFrame(
{
"key": ["bar", "baz", "foo", "foo"],
"value": [2, 3, 1, 4],
"value2": ["a", "b", "c", "c"],
},
index=[1, 2, 0, 3],
)
tm.assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on="key", sort=False)
tm.assert_index_equal(joined.index, Index(range(4)), exact=True)
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
df1 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 3, "a"])
df2 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 3, 3, 4])
result = df1.join(df2)
expected = DataFrame(
{"a": [1, 2, 3, 3, 4], "b": [5, np.nan, 6, 7, np.nan]},
index=[1, 2, 3, 3, "a"],
)
tm.assert_frame_equal(result, expected)
df3 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 2, "a"])
df4 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 2, 3, 4])
result = df3.join(df4)
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 6, np.nan]}, index=[1, 2, 2, "a"]
)
tm.assert_frame_equal(result, expected)
def test_join_non_unique_period_index(self):
# GH #16871
index = pd.period_range("2016-01-01", periods=16, freq="M")
df = DataFrame(list(range(len(index))), index=index, columns=["pnum"])
df2 = concat([df, df])
result = df.join(df2, how="inner", rsuffix="_df2")
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=["pnum", "pnum_df2"],
index=df2.sort_index().index,
)
tm.assert_frame_equal(result, expected)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6), columns=["a", "b", "c", "d", "e", "f"])
df.insert(0, "id", 0)
df.insert(5, "dt", "foo")
grouped = df.groupby("id")
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix="_right")
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list("abcdef"))
df_list = [df[["a", "b"]], df[["c", "d"]], df[["e", "f"]]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[["a", "b"]][:-2], df[["c", "d"]][2:], df[["e", "f"]][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how="outer")
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how="inner")
_check_diff_index(df_list, joined, df.index[2:8])
msg = "Joining multiple DataFrames only supported for joining on index"
with pytest.raises(ValueError, match=msg):
df_list[0].join(df_list[1:], on="a")
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=["A", "B", "C", "D"])
df["key"] = ["foo", "bar"] * 4
df1 = df.loc[:, ["A", "B"]]
df2 = df.loc[:, ["C", "D"]]
df3 = df.loc[:, ["key"]]
result = df1.join([df2, df3])
tm.assert_frame_equal(result, df)
def test_join_dups(self):
# joining dups
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix="_2")
result.columns = expected.columns
tm.assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer"
)
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ["x_x", "y_x", "x_y", "y_y", "x_x", "y_x", "x_y", "y_y"]
tm.assert_frame_equal(dta, expected)
def test_join_multi_to_multi(self, join_type):
# GH 20475
leftindex = MultiIndex.from_product(
[list("abc"), list("xy"), [1, 2]], names=["abc", "xy", "num"]
)
left = DataFrame({"v1": range(12)}, index=leftindex)
rightindex = MultiIndex.from_product(
[list("abc"), list("xy")], names=["abc", "xy"]
)
right = DataFrame({"v2": [100 * i for i in range(1, 7)]}, index=rightindex)
result = left.join(right, on=["abc", "xy"], how=join_type)
expected = (
left.reset_index()
.merge(right.reset_index(), on=["abc", "xy"], how=join_type)
.set_index(["abc", "xy", "num"])
)
tm.assert_frame_equal(expected, result)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
left.join(right, on="xy", how=join_type)
with pytest.raises(ValueError, match=msg):
right.join(left, on=["abc", "xy"], how=join_type)
def test_join_on_tz_aware_datetimeindex(self):
# GH 23931, 26335
df1 = DataFrame(
{
"date": pd.date_range(
start="2018-01-01", periods=5, tz="America/Chicago"
),
"vals": list("abcde"),
}
)
df2 = DataFrame(
{
"date": pd.date_range(
start="2018-01-03", periods=5, tz="America/Chicago"
),
"vals_2": list("tuvwx"),
}
)
result = df1.join(df2.set_index("date"), on="date")
expected = df1.copy()
expected["vals_2"] = Series([np.nan] * 2 + list("tuv"), dtype=object)
tm.assert_frame_equal(result, expected)
def test_join_datetime_string(self):
# GH 5647
dfa = DataFrame(
[
["2012-08-02", "L", 10],
["2012-08-02", "J", 15],
["2013-04-06", "L", 20],
["2013-04-06", "J", 25],
],
columns=["x", "y", "a"],
)
dfa["x"] = pd.to_datetime(dfa["x"])
dfb = DataFrame(
[["2012-08-02", "J", 1], ["2013-04-06", "L", 2]],
columns=["x", "y", "z"],
index=[2, 4],
)
dfb["x"] = pd.to_datetime(dfb["x"])
result = dfb.join(dfa.set_index(["x", "y"]), on=["x", "y"])
expected = DataFrame(
[
[Timestamp("2012-08-02 00:00:00"), "J", 1, 15],
[Timestamp("2013-04-06 00:00:00"), "L", 2, 20],
],
index=[2, 4],
columns=["x", "y", "z", "a"],
)
tm.assert_frame_equal(result, expected)
def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix="_y"):
# some smoke tests
for c in join_col:
assert result[c].notna().all()
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError as err:
if how in ("left", "inner"):
raise AssertionError(
f"key {group_key} should not have been in the join"
) from err
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError as err:
if how in ("right", "inner"):
raise AssertionError(
f"key {group_key} should not have been in the join"
) from err
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [
c for c in group.columns if c in columns or c.replace(suffix, "") in columns
]
# filter
group = group.loc[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ""))
# put in the right order...
group = group.loc[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = {tuple(row) for row in jvalues}
assert len(rows) == len(source)
assert all(tuple(row) in rows for row in svalues)
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert join_chunk[c].isna().all()
def _join_by_hand(a, b, how="left"):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in b_re.items():
a_re[col] = s
return a_re.reindex(columns=result_columns)
def test_join_inner_multiindex_deterministic_order():
# GH: 36910
left = DataFrame(
data={"e": 5},
index=MultiIndex.from_tuples([(1, 2, 4)], names=("a", "b", "d")),
)
right = DataFrame(
data={"f": 6}, index=MultiIndex.from_tuples([(2, 3)], names=("b", "c"))
)
result = left.join(right, how="inner")
expected = DataFrame(
{"e": [5], "f": [6]},
index=MultiIndex.from_tuples([(2, 1, 4, 3)], names=("b", "a", "d", "c")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])]
)
def test_join_cross(input_col, output_cols):
# GH#5401
left = DataFrame({"a": [1, 3]})
right = DataFrame({input_col: [3, 4]})
result = left.join(right, how="cross", lsuffix="_x", rsuffix="_y")
expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]})
tm.assert_frame_equal(result, expected)
def test_join_multiindex_one_level(join_type):
# GH#36909
left = DataFrame(
data={"c": 3}, index=pd.MultiIndex.from_tuples([(1, 2)], names=("a", "b"))
)
right = DataFrame(
data={"d": 4}, index=pd.MultiIndex.from_tuples([(2,)], names=("b",))
)
result = left.join(right, how=join_type)
expected = DataFrame(
{"c": [3], "d": [4]},
index=pd.MultiIndex.from_tuples([(2, 1)], names=["b", "a"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"categories, values",
[
(["Y", "X"], ["Y", "X", "X"]),
([2, 1], [2, 1, 1]),
([2.5, 1.5], [2.5, 1.5, 1.5]),
(
[Timestamp("2020-12-31"), Timestamp("2019-12-31")],
[Timestamp("2020-12-31"), Timestamp("2019-12-31"), Timestamp("2019-12-31")],
),
],
)
def test_join_multiindex_not_alphabetical_categorical(categories, values):
# GH#38502
left = DataFrame(
{
"first": ["A", "A"],
"second": Categorical(categories, categories=categories),
"value": [1, 2],
}
).set_index(["first", "second"])
right = DataFrame(
{
"first": ["A", "A", "B"],
"second": Categorical(values, categories=categories),
"value": [3, 4, 5],
}
).set_index(["first", "second"])
result = left.join(right, lsuffix="_left", rsuffix="_right")
expected = DataFrame(
{
"first": ["A", "A"],
"second": Categorical(categories, categories=categories),
"value_left": [1, 2],
"value_right": [3, 4],
}
).set_index(["first", "second"])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
lbishal/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
anooptoffy/Masters-Course-Work-Repository | Semester_2/Machine Perception/Assignment1/question_5.py | 2 | 1195 | import cv2
import numpy as np
from matplotlib import pyplot as plt
orig = cv2.imread("lenna.jpg", cv2.IMREAD_GRAYSCALE)
hist_equal = cv2.equalizeHist(orig)
# numpy does elementwise scalar operation on the image matrix
whitened_img = (orig - np.mean(orig)) / np.std(orig)
fig = plt.figure()
images = [orig, whitened_img, hist_equal]
titles = ["Original", "Whitened", "Histogram equalization"]
pos = 1
# display image and the corresponding historgram
for i in range(len(images)):
ax = fig.add_subplot(3, 2, pos)
ax.set_title(titles[i])
# uncomment to see the output like in cv2.imshow()
# if (images[i] == whitened_img).all():
# ax.imshow(images[i], cmap="gray", vmax=1, vmin=0)
# else:
# ax.imshow(images[i], cmap="gray")
ax.imshow(images[i], cmap="gray")
plt.xticks([])
plt.yticks([])
pos += 1
ax = fig.add_subplot(3, 2, pos)
# round to 2 decimal places
mean = round(np.mean(images[i]), 2)
std = round(np.std(images[i]), 2)
hist_title = "Mean:" + str(mean) + " Std:" + str(std)
ax.set_title(hist_title)
ax.hist(images[i].ravel(), 256, [0, 256])
pos += 1
plt.xticks([])
plt.yticks([])
plt.show()
| mit |
vibhorag/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
GitYiheng/reinforcement_learning_test | test00_previous_files/td0_q-learning_ust.py | 1 | 2019 | # TD0
# Q-Learning
# Uncertain state transition
import numpy as np
import matplotlib.pyplot as plt
from grid_world import standard_grid, negative_grid
from dp_ipe_dst_dp import print_values, print_policy
from mc_pi_dst_fv_es import max_dict
GAMMA = 0.9
ALPHA = 0.1
ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R')
def affected_action(a, eps=0.1):
p = np.random.random()
if p < (1 - eps):
return a
else:
return np.random.choice(ALL_POSSIBLE_ACTIONS)
if __name__ == '__main__':
grid = negative_grid(step_cost=-0.1)
print("Rewards:")
print_values(grid.rewards, grid)
Q = {}
states = grid.all_states()
for s in states:
Q[s] = {}
for a in ALL_POSSIBLE_ACTIONS:
Q[s][a] = 0
update_counts = {}
update_counts_sa = {}
for s in states:
update_counts_sa[s] = {}
for a in ALL_POSSIBLE_ACTIONS:
update_counts_sa[s][a] = 1.0
t = 1.0
deltas = []
for i in range(10000):
if i % 100 == 0:
t += 1e-2
if i % 2000 == 0:
print("i: ", i)
s = (0, 0)
grid.set_state(s)
a = max_dict(Q[s])[0]
biggest_change = 0
while not grid.game_over():
a = affected_action(a, eps=0.5/t)
r = grid.move(a)
s2 = grid.current_state()
if grid.game_over():
break
alpha = ALPHA / update_counts_sa[s][a]
update_counts_sa[s][a] += 0.005
old_qsa = Q[s][a]
a2, max_q_s2a2 = max_dict(Q[s2])
#a2 = affected_action(a2, eps=0.5/t)
Q[s][a] = Q[s][a] + alpha*(r + GAMMA*max_q_s2a2 - Q[s][a])
biggest_change = max(biggest_change, np.abs(old_qsa - Q[s][a]))
update_counts[s] = update_counts.get(s, 0) + 1
s = s2
#a = a2
deltas.append(biggest_change)
#plt.plot(deltas)
#plt.show()
policy = {}
V = {}
for s in grid.actions.keys():
a, max_q = max_dict(Q[s])
policy[s] = a
V[s] = max_q
print("Update counts:")
total = np.sum(list(update_counts.values()))
for k, v in update_counts.items():
update_counts[k] = float(v) / total
print_values(update_counts, grid)
print("Values:")
print_values(V, grid)
print("Policy:")
print_policy(policy, grid) | mit |
chrsrds/scikit-learn | sklearn/utils/setup.py | 8 | 3195 | import os
from os.path import join
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
from Cython import Tempita
config = Configuration('utils', parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension('sparsefuncs_fast',
sources=['sparsefuncs_fast.pyx'],
libraries=libraries)
config.add_extension('_cython_blas',
sources=['_cython_blas.pyx'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('murmurhash',
sources=['murmurhash.pyx', join(
'src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.pyx', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
# generate files from a template
pyx_templates = ['sklearn/utils/seq_dataset.pyx.tp',
'sklearn/utils/seq_dataset.pxd.tp']
for pyxfiles in pyx_templates:
outfile = pyxfiles.replace('.tp', '')
# if .pyx.tp is not updated, no need to output .pyx
if (os.path.exists(outfile) and
os.stat(pyxfiles).st_mtime < os.stat(outfile).st_mtime):
continue
with open(pyxfiles, "r") as f:
tmpl = f.read()
pyxcontent = Tempita.sub(tmpl)
with open(outfile, "w") as f:
f.write(pyxcontent)
config.add_extension('seq_dataset',
sources=['seq_dataset.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_random",
sources=["_random.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Ossada/DLS-UVVis | risanje_stopnica.py | 1 | 2219 | __author__ = 'vid'
import matplotlib.pyplot as plt
from tkinter import filedialog
import os
pot = filedialog.askdirectory(initialdir='/media/vid/DLS Data/VidS/Uv vis/')
seznam = os.listdir(pot)
di = {}
temperatura = []
absor = []
print(seznam)
for a in seznam:
if a[-4:] == '.CSV':
key = a.split('.')[0]
# print('nutr')
# try:
with open(pot + '//' + a, encoding='windows-1250') as file:
next(file)
for line in file:
temp = line.split(',')
temperatura.append(float(temp[0]))
absor.append(float(temp[1]))
di[key] = ([temperatura, absor])
# except:
# print(a)
temperatura = []
absor = []
print (di)
# print(di)
seznam = list(di.keys())
try:
fig, ax = plt.subplots(1)
# plt.plot(di['SEG1'][0], di['SEG1'][1], 'r')
# plt.plot(di['OHL'][0], di['OHL'][1], 'b', linewidth=3.0)
plt.plot(di['SEG2'][0], di['SEG2'][1], color='#ff6400', linewidth=3.0)
# tex = '$c=1mM$ $Seq4Amod3$ $+$' + '\n' '$10mM$ $NaPi$ $+$ $100mM$ $NaCl$' +\
# '\n' + '$\\lambda = 600 nm$'
tex = 'Črnilo'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.80, 0.90, tex, transform=ax.transAxes, fontsize=18,
verticalalignment='top', bbox=props)
plt.xlabel('$Temperatura$ $[^{\\circ}C]$', fontsize=22)
plt.ylabel('$Absorbanca$', fontsize=22)
# plt.xlim(10,90)
# plt.ylim(-0.01, 0.01)
# plt.show()
except:
pass
for ne in di:
for i in range(len(di[ne][1])-1):
vmes = abs(di[ne][1][i+1] - di[ne][1][i])
if vmes > 0.008:
try:
ods = (di[ne][1][i+1] + di[ne][1][i+2])/2 -(di[ne][1][i] + di[ne][1][i-1])/2
tocka = i
except:
pass
try:
for i in range(tocka+1, len(di[ne][1])):
di[ne][1][i] -= ods
except NameError:
pass
# try:
# di[ne][1][i+1] -= ods
# except:
# pass
# try:
# di[ne][1][tocka] += ods
# except:
# pass
tocka = 0
ods = 0
plt.plot(di['SEG2'][0], di['SEG2'][1], 'b', linewidth=1.0)
plt.show() | mit |
lbishal/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
shaneknapp/spark | python/pyspark/pandas/indexes/numeric.py | 2 | 5502 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Optional, Tuple, Union, cast
import pandas as pd
from pandas.api.types import is_hashable
from pyspark import pandas as ps
from pyspark.pandas._typing import Dtype
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.series import Series
class NumericIndex(Index):
"""
Provide numeric type operations.
This is an abstract class.
"""
pass
class IntegerIndex(NumericIndex):
"""
This is an abstract class for Int64Index.
"""
pass
class Int64Index(IntegerIndex):
"""
Immutable sequence used for indexing and alignment. The basic object
storing axis labels for all pandas objects. Int64Index is a special case
of `Index` with purely integer labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: int64)
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
See Also
--------
Index : The base pandas-on-Spark Index type.
Float64Index : A special case of :class:`Index` with purely float labels.
Notes
-----
An Index instance can **only** contain hashable objects.
Examples
--------
>>> ps.Int64Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
From a Series:
>>> s = ps.Series([1, 2, 3], index=[10, 20, 30])
>>> ps.Int64Index(s)
Int64Index([1, 2, 3], dtype='int64')
From an Index:
>>> idx = ps.Index([1, 2, 3])
>>> ps.Int64Index(idx)
Int64Index([1, 2, 3], dtype='int64')
"""
def __new__(
cls,
data: Optional[Any] = None,
dtype: Optional[Union[str, Dtype]] = None,
copy: bool = False,
name: Optional[Union[Any, Tuple]] = None,
) -> "Int64Index":
if not is_hashable(name):
raise TypeError("Index.name must be a hashable type")
if isinstance(data, (Series, Index)):
if dtype is None:
dtype = "int64"
return cast(Int64Index, Index(data, dtype=dtype, copy=copy, name=name))
return cast(
Int64Index, ps.from_pandas(pd.Int64Index(data=data, dtype=dtype, copy=copy, name=name))
)
class Float64Index(NumericIndex):
"""
Immutable sequence used for indexing and alignment. The basic object
storing axis labels for all pandas objects. Float64Index is a special case
of `Index` with purely float labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: float64)
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
See Also
--------
Index : The base pandas-on-Spark Index type.
Int64Index : A special case of :class:`Index` with purely integer labels.
Notes
-----
An Index instance can **only** contain hashable objects.
Examples
--------
>>> ps.Float64Index([1.0, 2.0, 3.0])
Float64Index([1.0, 2.0, 3.0], dtype='float64')
From a Series:
>>> s = ps.Series([1, 2, 3], index=[10, 20, 30])
>>> ps.Float64Index(s)
Float64Index([1.0, 2.0, 3.0], dtype='float64')
From an Index:
>>> idx = ps.Index([1, 2, 3])
>>> ps.Float64Index(idx)
Float64Index([1.0, 2.0, 3.0], dtype='float64')
"""
def __new__(
cls,
data: Optional[Any] = None,
dtype: Optional[Union[str, Dtype]] = None,
copy: bool = False,
name: Optional[Union[Any, Tuple]] = None,
) -> "Float64Index":
if not is_hashable(name):
raise TypeError("Index.name must be a hashable type")
if isinstance(data, (Series, Index)):
if dtype is None:
dtype = "float64"
return cast(Float64Index, Index(data, dtype=dtype, copy=copy, name=name))
return cast(
Float64Index,
ps.from_pandas(pd.Float64Index(data=data, dtype=dtype, copy=copy, name=name)),
)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.indexes.numeric
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.indexes.numeric.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.indexes.numeric tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.indexes.numeric,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
RJT1990/pyflux | pyflux/gas/tests/gasreg_tests_normal.py | 1 | 18248 | import numpy as np
import pandas as pd
import pyflux as pf
# Set up some data to use for the tests
noise = np.random.normal(0,1,250)
y = np.zeros(250)
x1 = np.random.normal(0,1,250)
x2 = np.random.normal(0,1,250)
for i in range(1,len(y)):
y[i] = 0.9*y[i-1] + noise[i] + 0.1*x1[i] - 0.3*x2[i]
data = pd.DataFrame([y,x1,x2]).T
data.columns = ['y', 'x1', 'x2']
y_oos = np.random.normal(0,1,30)
x1_oos = np.random.normal(0,1,30)
x2_oos = np.random.normal(0,1,30)
data_oos = pd.DataFrame([y_oos,x1_oos,x2_oos]).T
data_oos.columns = ['y', 'x1', 'x2']
def test_normal_no_terms():
"""
Tests the length of the latent variable vector for an GASReg model
with no AR or MA terms, and tests that the values are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_bbvi():
"""
Tests an GASReg model estimated with BBVI, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('BBVI',iterations=100, mini_batch=32)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI',iterations=100, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_normal_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_normal_mh():
"""
Tests an GASReg model estimated with Metropolis-Hastings, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_laplace():
"""
Tests an GASReg model estimated with Laplace approximation, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_pml():
"""
Tests an GASReg model estimated with PML, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit()
x.summary()
assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test_normal_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_normal_predict_nans():
"""
Tests that the predictions are not NaNs
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit()
x.summary()
assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test_normal_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit()
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
"""
def test_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
"""
def test_sample_model():
"""
Tests sampling function
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data))
def test_ppc():
"""
Tests PPC value
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
## Try more than one predictor
def test2_normal_no_terms():
"""
Tests the length of the latent variable vector for an GASReg model
with no AR or MA terms, and two predictors, and tests that the values
are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_bbvi():
"""
Tests an GASReg model estimated with BBVI, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI',iterations=100, mini_batch=32)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI',iterations=100, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test2_normal_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test2_normal_mh():
"""
Tests an GASReg model estimated with MEtropolis-Hastings, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_normal():
"""
Tests an GASReg model estimated with Laplace, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_pml():
"""
Tests an GASReg model estimated with PML, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit()
x.summary()
assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test2_normal_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test2_normal_predict_nans():
"""
Tests that the predictions are not NaNs
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit()
x.summary()
assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test2_normal_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test2_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit()
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
"""
def test2_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Normal())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
"""
def test2_sample_model():
"""
Tests sampling function
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data))
def test2_ppc():
"""
Tests PPC value
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.Normal())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
| bsd-3-clause |
juanyaw/PTVS | Python/Product/ML/ProjectTemplates/ClassifierTemplate/classifier.py | 18 | 10116 | '''
This script perfoms the basic process for applying a machine learning
algorithm to a dataset using Python libraries.
The four steps are:
1. Download a dataset (using pandas)
2. Process the numeric data (using numpy)
3. Train and evaluate learners (using scikit-learn)
4. Plot and compare results (using matplotlib)
The data is downloaded from URL, which is defined below. As is normal
for machine learning problems, the nature of the source data affects
the entire solution. When you change URL to refer to your own data, you
will need to review the data processing steps to ensure they remain
correct.
============
Example Data
============
The example is from http://archive.ics.uci.edu/ml/datasets/Spambase
It contains pre-processed metrics, such as the frequency of certain
words and letters, from a collection of emails. A classification for
each one indicating 'spam' or 'not spam' is in the final column.
See the linked page for full details of the data set.
This script uses three classifiers to predict the class of an email
based on the metrics. These are not representative of modern spam
detection systems.
'''
# Remember to update the script for the new data when you change this URL
URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data"
# Uncomment this call when using matplotlib to generate images
# rather than displaying interactive UI.
#import matplotlib
#matplotlib.use('Agg')
from pandas import read_table
import numpy as np
import matplotlib.pyplot as plt
try:
# [OPTIONAL] Seaborn makes plots nicer
import seaborn
except ImportError:
pass
# =====================================================================
def download_data():
'''
Downloads the data for this script into a pandas DataFrame.
'''
# If your data is in an Excel file, install 'xlrd' and use
# pandas.read_excel instead of read_table
#from pandas import read_excel
#frame = read_excel(URL)
# If your data is in a private Azure blob, install 'azure' and use
# BlobService.get_blob_to_path() with read_table() or read_excel()
#import azure.storage
#service = azure.storage.BlobService(ACCOUNT_NAME, ACCOUNT_KEY)
#service.get_blob_to_path(container_name, blob_name, 'my_data.csv')
#frame = read_table('my_data.csv', ...
frame = read_table(
URL,
# Uncomment if the file needs to be decompressed
#compression='gzip',
#compression='bz2',
# Specify the file encoding
# Latin-1 is common for data from US sources
encoding='latin-1',
#encoding='utf-8', # UTF-8 is also common
# Specify the separator in the data
sep=',', # comma separated values
#sep='\t', # tab separated values
#sep=' ', # space separated values
# Ignore spaces after the separator
skipinitialspace=True,
# Generate row labels from each row number
index_col=None,
#index_col=0, # use the first column as row labels
#index_col=-1, # use the last column as row labels
# Generate column headers row from each column number
header=None,
#header=0, # use the first line as headers
# Use manual headers and skip the first row in the file
#header=0,
#names=['col1', 'col2', ...],
)
# Return a subset of the columns
#return frame[['col1', 'col4', ...]]
# Return the entire frame
return frame
# =====================================================================
def get_features_and_labels(frame):
'''
Transforms and scales the input data and returns numpy arrays for
training and testing inputs and targets.
'''
# Replace missing values with 0.0, or we can use
# scikit-learn to calculate missing values (below)
#frame[frame.isnull()] = 0.0
# Convert values to floats
arr = np.array(frame, dtype=np.float)
# Use the last column as the target value
X, y = arr[:, :-1], arr[:, -1]
# To use the first column instead, change the index value
#X, y = arr[:, 1:], arr[:, 0]
# Use 80% of the data for training; test against the rest
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# sklearn.pipeline.make_pipeline could also be used to chain
# processing and classification into a black box, but here we do
# them separately.
# If values are missing we could impute them from the training data
#from sklearn.preprocessing import Imputer
#imputer = Imputer(strategy='mean')
#imputer.fit(X_train)
#X_train = imputer.transform(X_train)
#X_test = imputer.transform(X_test)
# Normalize the attribute values to mean=0 and variance=1
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# To scale to a specified range, use MinMaxScaler
#from sklearn.preprocessing import MinMaxScaler
#scaler = MinMaxScaler(feature_range=(0, 1))
# Fit the scaler based on the training data, then apply the same
# scaling to both training and test sets.
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Return the training and test sets
return X_train, X_test, y_train, y_test
# =====================================================================
def evaluate_classifier(X_train, X_test, y_train, y_test):
'''
Run multiple times with different classifiers to get an idea of the
relative performance of each configuration.
Returns a sequence of tuples containing:
(title, precision, recall)
for each learner.
'''
# Import some classifiers to test
from sklearn.svm import LinearSVC, NuSVC
from sklearn.ensemble import AdaBoostClassifier
# We will calculate the P-R curve for each classifier
from sklearn.metrics import precision_recall_curve, f1_score
# Here we create classifiers with default parameters. These need
# to be adjusted to obtain optimal performance on your data set.
# Test the linear support vector classifier
classifier = LinearSVC(C=1)
# Fit the classifier
classifier.fit(X_train, y_train)
score = f1_score(y_test, classifier.predict(X_test))
# Generate the P-R curve
y_prob = classifier.decision_function(X_test)
precision, recall, _ = precision_recall_curve(y_test, y_prob)
# Include the score in the title
yield 'Linear SVC (F1 score={:.3f})'.format(score), precision, recall
# Test the Nu support vector classifier
classifier = NuSVC(kernel='rbf', nu=0.5, gamma=1e-3)
# Fit the classifier
classifier.fit(X_train, y_train)
score = f1_score(y_test, classifier.predict(X_test))
# Generate the P-R curve
y_prob = classifier.decision_function(X_test)
precision, recall, _ = precision_recall_curve(y_test, y_prob)
# Include the score in the title
yield 'NuSVC (F1 score={:.3f})'.format(score), precision, recall
# Test the Ada boost classifier
classifier = AdaBoostClassifier(n_estimators=50, learning_rate=1.0, algorithm='SAMME.R')
# Fit the classifier
classifier.fit(X_train, y_train)
score = f1_score(y_test, classifier.predict(X_test))
# Generate the P-R curve
y_prob = classifier.decision_function(X_test)
precision, recall, _ = precision_recall_curve(y_test, y_prob)
# Include the score in the title
yield 'Ada Boost (F1 score={:.3f})'.format(score), precision, recall
# =====================================================================
def plot(results):
'''
Create a plot comparing multiple learners.
`results` is a list of tuples containing:
(title, precision, recall)
All the elements in results will be plotted.
'''
# Plot the precision-recall curves
fig = plt.figure(figsize=(6, 6))
fig.canvas.set_window_title('Classifying data from ' + URL)
for label, precision, recall in results:
plt.plot(recall, precision, label=label)
plt.title('Precision-Recall Curves')
plt.xlabel('Precision')
plt.ylabel('Recall')
plt.legend(loc='lower left')
# Let matplotlib improve the layout
plt.tight_layout()
# ==================================
# Display the plot in interactive UI
plt.show()
# To save the plot to an image file, use savefig()
#plt.savefig('plot.png')
# Open the image file with the default image viewer
#import subprocess
#subprocess.Popen('plot.png', shell=True)
# To save the plot to an image in memory, use BytesIO and savefig()
# This can then be written to any stream-like object, such as a
# file or HTTP response.
#from io import BytesIO
#img_stream = BytesIO()
#plt.savefig(img_stream, fmt='png')
#img_bytes = img_stream.getvalue()
#print('Image is {} bytes - {!r}'.format(len(img_bytes), img_bytes[:8] + b'...'))
# Closing the figure allows matplotlib to release the memory used.
plt.close()
# =====================================================================
if __name__ == '__main__':
# Download the data set from URL
print("Downloading data from {}".format(URL))
frame = download_data()
# Process data into feature and label arrays
print("Processing {} samples with {} attributes".format(len(frame.index), len(frame.columns)))
X_train, X_test, y_train, y_test = get_features_and_labels(frame)
# Evaluate multiple classifiers on the data
print("Evaluating classifiers")
results = list(evaluate_classifier(X_train, X_test, y_train, y_test))
# Display the results
print("Plotting the results")
plot(results)
| apache-2.0 |
seung-lab/znn-release | python/emirt/show.py | 2 | 8128 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 28 14:28:47 2015
@author: jingpeng
"""
import numpy as np
import matplotlib.pylab as plt
from matplotlib import colors
class CompareVol:
'''A plotting object which encapsulates functionality
described in compare_volumes.py'''
def __init__(self, vols, cmap='gray'):
#zero-padded copies of the volumes
self.vols = self.__pad(vols)
#Number of slices to display
self.Nz = min([elem.shape[0]-1 for elem in vols])
#Current z index
self.z = 0
#Colormap argument to be passed to imshow (set under 'c' keypress)
self.cmap = [cmap for elem in vols]
#Whether or not the plot at an index is a color plot
self.colorplot = [False for elem in vols]
#Defining a current index of the vols to be 'selected' for
# modification by (for example) adding color
self.selected = 0
def __pad(self, vols):
'''Zero-padding a list of input volumes to match by non-z dimensions'''
shapes = np.array(
[elem.shape for elem in vols]
)
max_shape = np.max(shapes,0)
pad_vols = [np.zeros((elem.shape[0], max_shape[1], max_shape[2]))
for elem in vols]
dim_diffs = [(max_shape - elem.shape) / 2
for elem in vols]
for i in range(len(pad_vols)):
if all(dim_diffs[i][1:] != 0):
pad_vols[i][
:,
dim_diffs[i][1]:-(dim_diffs[i][1]),
dim_diffs[i][2]:-(dim_diffs[i][2])
] = vols[i]
else:
pad_vols[i] = vols[i]
return pad_vols
def __norm(self, imslice):
#subtract the nonzero minimum from each slice
nonzero_indices = np.nonzero(imslice)
if len(nonzero_indices) > 0 and np.max(imslice) > 1:
nonzero_min = np.min(imslice[np.nonzero(imslice)])
res = np.copy(imslice)
res[np.nonzero(res)] = res[np.nonzero(res)] - nonzero_min + 1
else:
res = imslice
return res
def __show_slice(self):
'''Basic display function'''
for i in range(1,len(self.axs)+1):
ax = self.axs[i-1]
ax.images.pop()
normed_slice = self.__norm(self.vols[i-1][self.z,:,:])
ax.imshow(normed_slice, interpolation='nearest', cmap=self.cmap[i-1])
ax.set_xlabel( ' volume {}: slice {}'.format(i,self.z) )
self.fig.canvas.draw()
def __make_cmap(self, i):
#(0,0,0) = black
plot_colors = np.vstack(((0,0,0), np.random.rand(500,3)))
cmap = colors.ListedColormap(plot_colors)
return cmap
def __press(self, event):
# print 'press ' + event.key
if 'down' == event.key and self.z<self.Nz:
self.z+=1
elif 'up' == event.key and self.z>-self.Nz:
self.z-=1
elif 'c' == event.key:
#Swap between color display and b&w
self.colorplot[self.selected] = not self.colorplot[self.selected]
if self.colorplot[self.selected]:
new_cmap = self.__make_cmap(self.selected)
self.cmap[self.selected] = new_cmap
else:
self.cmap[self.selected] = 'gray'
elif 'j' == event.key:
self.z += 10
elif 'k' == event.key:
self.z -= 10
elif 'v' == event.key:
#Display the data values for the given data coordinate
xcoord, ycoord = int(event.xdata), int(event.ydata)
print xcoord, ycoord
print [vol[self.z, ycoord, xcoord] for vol in self.vols]
elif event.key in ['1','2','3','4','5','6','7','8','9']:
#Select a new axis
index = int(event.key)
if index - 1 < len(self.vols):
self.selected = index-1
self.__show_slice()
def vol_compare_slice(self):
self.fig, self.axs = plt.subplots(1,len(self.vols), sharex=True, sharey=True)
self.fig.canvas.mpl_connect('key_press_event', self.__press)
for i in range(1,len(self.vols)+1):
ax = self.axs[i-1]
normed_slice = self.__norm(self.vols[i-1][self.z,:,:])
ax.imshow(normed_slice, interpolation='nearest', cmap=self.cmap[i-1])
ax.set_xlabel( ' volume {0}: slice {1}'.format(i,self.z) )
plt.show()
def compare_volumes( vols ):
"""
compare 3D volumes
Parameters
----------
vols: list of 3D array
"""
com = CompareVol(vols)
com.vol_compare_slice()
return
class VolSlider:
def __init__(self, fname, cmap='gray'):
if ".h5" in fname or ".hdf5" in fname:
import h5py
f = h5py.File(fname)
self.v1 = f['/main']
if len(f['/main'].dims) == 3:
self.Nz = self.v1.dims[0]
elif len(f['/main'].dims) == 4:
self.Nz = self.v1.dims[1]
else:
raise RuntimeError('invalid input matrix dimension')
else:
import emirt.io
self.v1 = emirt.io.imread( fname )
self.Nz = self.v1.shape[0]
self.z = 0
self.cmap = cmap
def __show_slice(self):
self.ax1.images.pop()
if len(self.v1.dims) == 3:
self.ax1.imshow(self.v1[self.z,:,:], interpolation='nearest', cmap=self.cmap)
else:
self.ax1.imshow(self.v1[0,self.z,:,:], interpolation='nearest', cmap=self.cmap)
self.ax1.set_xlabel( 'first volume: slice {}'.format(self.z) )
self.fig.canvas.draw()
def __press(self, event):
# print 'press ' + event.key
if 'down' in event.key and self.z<self.Nz:
self.z+=1
elif 'up' in event.key and self.z>-self.Nz:
self.z-=1
self.__show_slice()
def show(self):
self.fig, self.ax1 = plt.subplots(1,1)
self.fig.canvas.mpl_connect('key_press_event', self.__press)
if len(self.v1.dims) == 3:
self.ax1.imshow(self.v1[self.z,:,:], interpolation='nearest', cmap=self.cmap)
else:
self.ax1.imshow(self.v1[0,self.z,:,:], interpolation='nearest', cmap=self.cmap)
self.ax1.set_xlabel( 'first volume: slice {}'.format(self.z) )
def matshow(mat, xlabel=''):
import matplotlib.pylab as plt
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.matshow(mat, cmap=plt.cm.gray_r)
# add numbers
Nx, Ny = mat.shape
x,y = np.meshgrid(range(Nx), range(Ny))
for i,j in zip(x.ravel(),y.ravel()):
s = str( np.round(mat[i,j], decimals=2) )
if mat[i,j]<np.mean(mat):
ax1.annotate(s, xy=(i,j), ha='center', va='center')
else:
ax1.annotate(s, xy=(i,j), ha='center', va='center', color='white')
ax1.set_xlabel(xlabel)
plt.show()
def imshow(im):
import matplotlib.pylab as plt
plt.imshow(im)
# show the labeled image with random color
def random_color_show( im, mode='mat' ):
import matplotlib.pylab as plt
import matplotlib.colors as mcolor
# make a random color map, but the background should be black
assert(im.max()>0)
cmap_array = np.random.rand ( im.max()+1,3)
cmap_array[0,:] = [0,0,0]
cmap=mcolor.ListedColormap( cmap_array )
if mode=='im':
plt.imshow(im, cmap= cmap )
elif mode=='mat':
# approximate the matshow for compatability of subplot
nr, nc = im.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
plt.imshow(im, extent=extent, origin='upper',interpolation='nearest', cmap=cmap)
# plt.matshow(im, cmap=mcolor.ListedColormap( cmap_array ) )
else:
print 'unknown mode'
plt.show()
def progress(count, total, suffix=''):
import sys
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
| gpl-3.0 |
rcgillis/rcgillis.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
bokeh/bokeh | bokeh/sphinxext/bokeh_dataframe.py | 1 | 3395 | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
""" Generate an inline visual representations of a pandas Dataframe.
This directive will embed the output of ``df.head().to_html()`` into the HTML
output.
For example:
.. code-block:: rest
:bokeh-dataframe:`bokeh.sampledata.sprint.sprint`
Will generate the output:
:bokeh-dataframe:`bokeh.sampledata.sprint.sprint`
"""
# -----------------------------------------------------------------------------
# Boilerplate
# -----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
# Standard library imports
import importlib
# External imports
import pandas as pd
from docutils import nodes
from sphinx.errors import SphinxError
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
__all__ = (
"bokeh_dataframe",
"setup",
)
# -----------------------------------------------------------------------------
# General API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
def bokeh_dataframe(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Generate an inline visual representation of a single color palette.
If the HTML representation of the dataframe can not be created, a
SphinxError is raised to terminate the build.
For details on the arguments to this function, consult the Docutils docs:
http://docutils.sourceforge.net/docs/howto/rst-roles.html#define-the-role-function
"""
module_name, df_name = text.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except ImportError:
raise SphinxError(f"Unable to generate HTML table for {df_name}: couldn't import module {module_name}")
df = getattr(module, df_name, None)
if df is None:
raise SphinxError(f"Unable to generate HTML table for {df_name}: no Dataframe {df_name} in module {module_name}")
if not isinstance(df, pd.DataFrame):
raise SphinxError(f"{text!r} is not a pandas Dataframe")
node = nodes.raw("", df.head().to_html(), format="html")
return [node], []
def setup(app):
""" Required Sphinx extension setup function. """
app.add_role("bokeh-dataframe", bokeh_dataframe)
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| bsd-3-clause |
1412kid/computationalphysics_n2014301020035 | Chapter4/chapter4.1/problem4.9.py | 2 | 1826 | import matplotlib.pyplot as plt
import math
from random import *
x1,y1,x2,y2,x3,y3 = [-20],[5],[10],[-25],[-7],[15]
vx1,vy1,vx2,vy2,vx3,vy3 = [-math.sqrt(2)],[0],[math.sqrt(2)/2],[0],[math.sqrt(2)/2],[0]
t=[0]
dt=0.002
while t[-1]<50:
r12=math.sqrt((x1[-1]-x2[-1])**2+(y1[-1]-y2[-1])**2)
r13=math.sqrt((x1[-1]-x3[-1])**2+(y1[-1]-y3[-1])**2)
r23=math.sqrt((x2[-1]-x3[-1])**2+(y2[-1]-y3[-1])**2)
ox1=-4*math.pi*math.pi*(x1[-1]-x2[-1])/(r12**3)-4*math.pi*math.pi*(x1[-1]-x3[-1])/(r13**3)
ox2=-4*math.pi*math.pi*(x2[-1]-x1[-1])/(r12**3)-4*math.pi*math.pi*(x2[-1]-x3[-1])/(r23**3)
ox3=-4*math.pi*math.pi*(x3[-1]-x1[-1])/(r13**3)-4*math.pi*math.pi*(x3[-1]-x2[-1])/(r23**3)
oy1=-4*math.pi*math.pi*(y1[-1]-y2[-1])/(r12**3)-4*math.pi*math.pi*(y1[-1]-y3[-1])/(r13**3)
oy2=-4*math.pi*math.pi*(y2[-1]-y1[-1])/(r12**3)-4*math.pi*math.pi*(y2[-1]-y3[-1])/(r23**3)
oy3=-4*math.pi*math.pi*(y3[-1]-y1[-1])/(r13**3)-4*math.pi*math.pi*(y3[-1]-y2[-1])/(r23**3)
vx1.append(vx1[-1]+ox1*dt)
vx2.append(vx2[-1]+ox2*dt)
vx3.append(vx3[-1]+ox3*dt)
vy1.append(vy1[-1]+oy1*dt)
vy2.append(vy2[-1]+oy2*dt)
vy3.append(vy3[-1]+oy3*dt)
x1.append(x1[-1]+vx1[-1]*dt)
x2.append(x2[-1]+vx2[-1]*dt)
x3.append(x3[-1]+vx3[-1]*dt)
y1.append(y1[-1]+vy1[-1]*dt)
y2.append(y2[-1]+vy2[-1]*dt)
y3.append(y3[-1]+vy3[-1]*dt)
t.append(t[-1]+dt)
fig=plt.figure(figsize=[8,8])
plt.plot(x1,y1,label='sun 1',color='gold')
plt.plot(x2,y2,label='sun 2',color='pink')
plt.plot(x3,y3,label='sun 3',color='brown')
plt.scatter([x1[-1],x2[-1],x3[-1]],[y1[-1],y2[-1],y3[-1]],color="black",s=50)
plt.scatter([x1[0],x2[0],x3[0]],[y1[0],y2[0],y3[0]],color="red",s=50)
plt.legend(loc='upper right')
plt.xlabel('x(AU)')
plt.ylabel('y(AU)')
plt.title('three body problem')
plt.show()
| mit |
simonvh/gimmemotifs | setup.py | 1 | 5099 | from setuptools import setup, find_packages
from setuptools import Extension, Command
from distutils.command.build import build
from setuptools.command.install import install
import distutils.command.install as orig
import os
import glob
import sys
from io import open
from compile_externals import compile_all
import versioneer
CONFIG_NAME = "gimmemotifs.cfg"
DESCRIPTION = "GimmeMotifs is a motif prediction pipeline."
with open('README.md', encoding='utf-8') as f:
long_description = f.read().strip("\n")
# are we in the conda build environment?
conda_build = os.environ.get("CONDA_BUILD")
module1 = Extension('gimmemotifs.c_metrics', sources = ['gimmemotifs/c_metrics.c'])
MOTIF_BINS = {
"MDmodule": ["src/MDmodule/MDmodule"],
"BioProspector": ["src/BioProspector/BioProspector"],
"Posmo": ["src/posmo/posmo", "src/posmo/clusterwd"],
"AMD": ["src/AMD/AMD.bin"],
}
cmdclass = versioneer.get_cmdclass()
my_build_py = cmdclass["build_py"]
class build_tools(my_build_py):
user_options = []
def initialize_options(self):
my_build_py.initialize_options(self)
self.build_base = None
self.build_lib = None
def finalize_options(self):
my_build_py.finalize_options(self)
self.set_undefined_options('build',('build_base', 'build_base'))
self.set_undefined_options('build',('build_lib', 'build_lib'))
def run(self):
if not self.dry_run:
src_dir = os.path.join(self.build_base, "src")
target_dir = os.path.join(self.build_lib, 'gimmemotifs/included_tools')
#package_data={'gimmemotifs.data':['data/cfg/*']},
self.copy_tree("src/", src_dir)
# mkpath is a distutils helper to create directories
self.mkpath(target_dir)
compile_all(src_dir=src_dir)
for exes in MOTIF_BINS.values():
for exe in exes:
if os.path.exists(exe):
self.copy_file(exe, target_dir)
exe = os.path.join(self.build_base, exe)
if os.path.exists(exe):
self.copy_file(exe, target_dir)
self.copy_tree(
os.path.join(src_dir, "ChIPMunk"),
os.path.join(target_dir, "ChIPMunk"))
self.copy_tree(
os.path.join(src_dir,"HMS"),
os.path.join(target_dir, "HMS"))
self.copy_file(
os.path.join(src_dir,"MotifSampler/MotifSampler_x86_64"),
os.path.join(target_dir, "MotifSampler"))
self.copy_file(
os.path.join(src_dir,"MotifSampler/CreateBackgroundModel_x86_64"),
os.path.join(target_dir, "CreateBackgroundModel"))
self.copy_file(
os.path.join(src_dir,"Improbizer/ameme_x86_64"),
os.path.join(target_dir, "ameme"))
if os.path.exists("src/weblogo"):
self.copy_tree("src/weblogo",
os.path.join(target_dir, "weblogo"))
my_build_py.run(self)
cmdclass["build_py"] = build_tools
setup (
name = 'gimmemotifs',
version = versioneer.get_version(),
long_description = long_description,
long_description_content_type = 'text/markdown',
description = DESCRIPTION,
author = 'Simon van Heeringen',
author_email = '[email protected]',
url = 'https://github.com/simonvh/gimmemotifs/',
download_url = 'https://github.com/simonvh/gimmemotifs/tarball/' + versioneer.get_version(),
license = 'MIT',
packages=find_packages(),
scripts=[
'scripts/track2fasta.py',
'scripts/gimme',
'scripts/combine_peaks',
'scripts/coverage_table',
],
include_package_data = True,
ext_modules = [module1],
cmdclass = cmdclass,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires = [
"setuptools >= 0.7",
"numpy",
"scipy >= 0.9.0",
"matplotlib >= 2",
"jinja2",
"pyyaml >= 3.10",
"pybedtools",
"statsmodels",
"scikit-learn",
"sklearn-contrib-lightning",
"seaborn",
"pysam",
"xgboost >= 0.71",
"xdg",
"diskcache",
"xxhash",
"configparser",
"six",
"future",
"genomepy",
"tqdm",
"pillow",
],
)
| mit |
altairpearl/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 15 | 13124 | import sys
import numpy as np
from nose import SkipTest
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# import reload
version = sys.version_info
if version[0] == 3:
# Python 3+ import for reload. Builtin in Python2
if version[1] == 3:
reload = None
else:
from importlib import reload
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
tested_length = min(clf_lda_svd.explained_variance_ratio_.shape[0],
clf_lda_eigen.explained_variance_ratio_.shape[0])
# NOTE: clf_lda_eigen.explained_variance_ratio_ is not of n_components
# length. Make it the same length as clf_lda_svd.explained_variance_ratio_
# before comparison.
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_[:tested_length])
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
if reload is None:
raise SkipTest("Can't reload module on Python3.3")
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
quasiben/bokeh | examples/app/movies/main.py | 6 | 3978 | from os.path import dirname, join
import numpy as np
import pandas.io.sql as psql
import sqlite3 as sql
from bokeh.plotting import Figure
from bokeh.models import ColumnDataSource, HoverTool, HBox, VBoxForm
from bokeh.models.widgets import Slider, Select, TextInput
from bokeh.io import curdoc
from bokeh.sampledata.movies_data import movie_path
conn = sql.connect(movie_path)
query = open(join(dirname(__file__), 'query.sql')).read()
movies = psql.read_sql(query, conn)
movies["color"] = np.where(movies["Oscars"] > 0, "orange", "grey")
movies["alpha"] = np.where(movies["Oscars"] > 0, 0.9, 0.25)
movies.fillna(0, inplace=True) # just replace missing values with zero
movies["revenue"] = movies.BoxOffice.apply(lambda x: '{:,d}'.format(int(x)))
with open(join(dirname(__file__), "razzies-clean.csv")) as f:
razzies = f.read().splitlines()
movies.loc[movies.imdbID.isin(razzies), "color"] = "purple"
movies.loc[movies.imdbID.isin(razzies), "alpha"] = 0.9
axis_map = {
"Tomato Meter": "Meter",
"Numeric Rating": "numericRating",
"Number of Reviews": "Reviews",
"Box Office (dollars)": "BoxOffice",
"Length (minutes)": "Runtime",
"Year": "Year",
}
# Create Input controls
reviews = Slider(title="Minimum number of reviews", value=80, start=10, end=300, step=10)
min_year = Slider(title="Year released", start=1940, end=2014, value=1970, step=1)
max_year = Slider(title="End Year released", start=1940, end=2014, value=2014, step=1)
oscars = Slider(title="Minimum number of Oscar wins", start=0, end=4, value=0, step=1)
boxoffice = Slider(title="Dollars at Box Office (millions)", start=0, end=800, value=0, step=1)
genre = Select(title="Genre", value="All",
options=open(join(dirname(__file__), 'genres.txt')).read().split())
director = TextInput(title="Director name contains")
cast = TextInput(title="Cast names contains")
x_axis = Select(title="X Axis", options=sorted(axis_map.keys()), value="Tomato Meter")
y_axis = Select(title="Y Axis", options=sorted(axis_map.keys()), value="Number of Reviews")
# Create Column Data Source that will be used by the plot
source = ColumnDataSource(data=dict(x=[], y=[], color=[], title=[], year=[], revenue=[]))
hover = HoverTool(tooltips=[
("Title","@title"),
("Year", "@year"),
("$", "@revenue")
])
p = Figure(plot_height=600, plot_width=800, title="", toolbar_location=None, tools=[hover])
p.circle(x="x", y="y", source=source, size=7, color="color", line_color=None, fill_alpha="alpha")
def select_movies():
genre_val = genre.value
director_val = director.value.strip()
cast_val = cast.value.strip()
selected = movies[
(movies.Reviews >= reviews.value) &
(movies.BoxOffice >= (boxoffice.value * 1e6)) &
(movies.Year >= min_year.value) &
(movies.Year <= max_year.value) &
(movies.Oscars >= oscars.value)
]
if (genre_val != "All"):
selected = selected[selected.Genre.str.contains(genre_val)==True]
if (director_val != ""):
selected = selected[selected.Director.str.contains(director_val)==True]
if (cast_val != ""):
selected = selected[selected.Cast.str.contains(cast_val)==True]
return selected
def update(attrname, old, new):
df = select_movies()
x_name = axis_map[x_axis.value]
y_name = axis_map[y_axis.value]
p.xaxis.axis_label = x_axis.value
p.yaxis.axis_label = y_axis.value
p.title = "%d movies selected" % len(df)
source.data = dict(
x=df[x_name],
y=df[y_name],
color=df["color"],
title=df["Title"],
year=df["Year"],
revenue=df["revenue"],
alpha=df["alpha"],
)
controls = [reviews, boxoffice, genre, min_year, max_year, oscars, director, cast, x_axis, y_axis]
for control in controls:
control.on_change('value', update)
inputs = HBox(VBoxForm(*controls), width=300)
update(None, None, None) # initial load of the data
curdoc().add_root(HBox(inputs, p, width=1100))
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.