repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kislayabhi/pgmpy | pgmpy/inference/ExactInference.py | 1 | 16262 | #!/usr/bin/env python3
import numpy as np
from pgmpy.inference import Inference
from pgmpy.factors.Factor import factor_product
class VariableElimination(Inference):
def _variable_elimination(self, variables, operation, evidence=None, elimination_order=None):
"""
Implementation of a generalized variable elimination.
Parameters
----------
variables: list, array-like
variables that are not to be eliminated.
operation: str ('marginalize' | 'maximize')
The operation to do for eliminating the variable.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list, array-like
list of variables representing the order in which they
are to be eliminated. If None order is computed automatically.
"""
# Dealing with the case when variables is not provided.
if not variables:
all_factors = []
for factor_li in self.factors.values():
all_factors.extend(factor_li)
return set(all_factors)
eliminated_variables = set()
working_factors = {node: {factor for factor in self.factors[node]}
for node in self.factors}
# Dealing with evidence. Reducing factors over it before VE is run.
if evidence:
for evidence_var in evidence:
for factor in working_factors[evidence_var]:
factor_reduced = factor.reduce('{evidence_var}_{state}'.format(evidence_var=evidence_var,
state=evidence[evidence_var]),
inplace=False)
for var in factor_reduced.scope():
working_factors[var].remove(factor)
working_factors[var].add(factor_reduced)
del working_factors[evidence_var]
# TODO: Modify it to find the optimal elimination order
if not elimination_order:
elimination_order = list(set(self.variables) -
set(variables) -
set(evidence.keys() if evidence else []))
elif any(var in elimination_order for var in
set(variables).union(set(evidence.keys() if evidence else []))):
raise ValueError("Elimination order contains variables which are in"
" variables or evidence args")
for var in elimination_order:
# Removing all the factors containing the variables which are
# eliminated (as all the factors should be considered only once)
factors = [factor for factor in working_factors[var]
if not set(factor.variables).intersection(eliminated_variables)]
phi = factor_product(*factors)
phi = getattr(phi, operation)(var, inplace=False)
del working_factors[var]
for variable in phi.variables:
working_factors[variable].add(phi)
eliminated_variables.add(var)
final_distribution = set()
for node in working_factors:
factors = working_factors[node]
for factor in factors:
if not set(factor.variables).intersection(eliminated_variables):
final_distribution.add(factor)
query_var_factor = {}
for query_var in variables:
phi = factor_product(*final_distribution)
phi.marginalize(list(set(variables) - set([query_var])))
query_var_factor[query_var] = phi.normalize(inplace=False)
return query_var_factor
def query(self, variables, evidence=None, elimination_order=None):
"""
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list
order of variable eliminations (if nothing is provided) order is
computed automatically
Examples
--------
>>> from pgmpy.inference import VariableElimination
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> phi_query = inference.query(['A', 'B'])
"""
return self._variable_elimination(variables, 'marginalize',
evidence=evidence, elimination_order=elimination_order)
def max_marginal(self, variables=None, evidence=None, elimination_order=None):
"""
Computes the max-marginal over the variables given the evidence.
Parameters
----------
variables: list
list of variables over which we want to compute the max-marginal.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list
order of variable eliminations (if nothing is provided) order is
computed automatically
Examples
--------
>>> from pgmpy.inference import VariableElimination
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> phi_query = inference.max_marginal(['A', 'B'])
"""
if not variables:
variables = []
final_distribution = self._variable_elimination(variables, 'maximize',
evidence=evidence,
elimination_order=elimination_order)
# To handle the case when no argument is passed then
# _variable_elimination returns a dict.
if isinstance(final_distribution, dict):
final_distribution = final_distribution.values()
return np.max(factor_product(*final_distribution).values)
def map_query(self, variables=None, evidence=None, elimination_order=None):
"""
Computes the MAP Query over the variables given the evidence.
Parameters
----------
variables: list
list of variables over which we want to compute the max-marginal.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list
order of variable eliminations (if nothing is provided) order is
computed automatically
Examples
--------
>>> from pgmpy.inference import VariableElimination
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> phi_query = inference.map_query(['A', 'B'])
"""
if not variables:
variables = []
final_distribution = self._variable_elimination(variables, 'maximize',
evidence=evidence,
elimination_order=elimination_order)
# To handle the case when no argument is passed then
# _variable_elimination returns a dict.
if isinstance(final_distribution, dict):
final_distribution = final_distribution.values()
distribution = factor_product(*final_distribution)
argmax = np.argmax(distribution.values)
assignment = distribution.assignment(argmax)[0]
map_query_results = {}
for var_assignment in assignment:
var, value = var_assignment.split('_')
map_query_results[var] = int(value)
return map_query_results
def induced_graph(self, elimination_order):
"""
Returns the induced graph formed by running Variable Elimination on the network.
Parameters
----------
elimination_order: list, array like
List of variables in the order in which they are to be eliminated.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import VariableElimination
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> inference.induced_graph(['C', 'D', 'A', 'B', 'E'])
<networkx.classes.graph.Graph at 0x7f34ac8c5160>
"""
class BeliefPropagation(Inference):
"""
Class for performing inference using Belief Propagation method.
Creates a Junction Tree or Clique Tree (JunctionTree class) for the input
probabilistic graphical model and performs calibration of the junction tree
so formed using belief propagation.
Parameters
----------
model: BayesianModel, MarkovModel, FactorGraph, JunctionTree
model for which inference is to performed
"""
def __init__(self, model):
from pgmpy.models import JunctionTree
super(BeliefPropagation, self).__init__(model)
if not isinstance(model, JunctionTree):
self.junction_tree = model.to_junction_tree()
else:
self.junction_tree = model
self.clique_beliefs = {}
self.sepset_beliefs = {}
def get_cliques(self):
"""
Returns cliques used for belief propagation.
"""
return self.junction_tree.nodes()
def get_clique_beliefs(self):
"""
Returns clique beliefs. Should be called after the clique tree (or
junction tree) is calibrated.
"""
return self.clique_beliefs
def get_sepset_beliefs(self):
"""
Returns sepset beliefs. Should be called after clique tree (or junction
tree) is calibrated.
"""
return self.sepset_beliefs
def calibrate(self):
"""
Calibration using belief propagation in junction tree or clique tree.
Uses Lauritzen-Spiegelhalter algorithm or belief-update message passing.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors import TabularCPD
>>> from pgmpy.inference import BeliefPropagation
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
... [0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
... [0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> sat_cpd = TabularCPD('SAT', 2,
... [[0.1, 0.2, 0.7],
... [0.9, 0.8, 0.3]],
... evidence=['intel'], evidence_card=[3])
>>> letter_cpd = TabularCPD('letter', 2,
... [[0.1, 0.4, 0.8],
... [0.9, 0.6, 0.2]],
... evidence=['grade'], evidence_card=[3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd)
>>> bp = BeliefPropagation(G)
>>> bp.calibrate()
Reference
---------
Algorithm 10.3 Calibration using belief propagation in clique tree
Probabilistic Graphical Models: Principles and Techniques
Daphne Koller and Nir Friedman.
"""
import networkx as nx
# Initialize clique beliefs as well as sepset beliefs
self.clique_beliefs = {clique: self.junction_tree.get_factors(clique)
for clique in self.junction_tree.nodes()}
self.sepset_beliefs = {frozenset(x[0]).intersection(frozenset(x[1])): None
for x in self.junction_tree.edges()}
def _update_beliefs(sending_clique, recieving_clique):
"""
This is belief-update method.
Takes belief of one clique and uses it to update the belief of the
neighboring ones.
"""
sepset = frozenset(sending_clique).intersection(frozenset(recieving_clique))
print(sending_clique, sepset, recieving_clique)
# \sigma_{i \rightarrow j} = \sum_{C_i - S_{i, j}} \beta_i
# marginalize the clique over the sepset
sigma = self.clique_beliefs[sending_clique].marginalize(
list(frozenset(sending_clique) - sepset), inplace=False)
print(sigma)
# \beta_j = \beta_j * \frac{\sigma_{i \rightarrow j}}{\mu_{i, j}}
self.clique_beliefs[recieving_clique] *= (sigma / self.sepset_beliefs[sepset]
if self.sepset_beliefs[sepset] else sigma)
print(self.clique_beliefs[recieving_clique])
# \mu_{i, j} = \sigma_{i \rightarrow j}
self.sepset_beliefs[sepset] = sigma
print(self.sepset_beliefs[sepset])
def _converged():
"""
Checks whether the calibration has converged or not. At convergence
the sepset belief would be precisely the sepset marginal.
Formally, at convergence this condition would be satisified
\sum_{C_i - S_{i, j}} \beta_i = \sum_{C_j - S_{i, j}} \beta_j = \mu_{i, j}
"""
for edge in self.junction_tree.edges():
sepset = frozenset(edge[0]).intersection(frozenset(edge[1]))
marginal_1 = self.clique_beliefs[edge[0]].marginalize(list(frozenset(edge[0]) - sepset), inplace=False)
marginal_2 = self.clique_beliefs[edge[1]].marginalize(list(frozenset(edge[1]) - sepset), inplace=False)
if not np.allclose(marginal_1.values, marginal_2.values, rtol=1e-4):
return False
return True
for clique in self.junction_tree.nodes():
if not _converged():
neighbors = self.junction_tree.neighbors(clique)
# update root's belief using nieighbor clique's beliefs
# upward pass
for neighbor_clique in neighbors:
_update_beliefs(neighbor_clique, clique)
bfs_edges = nx.algorithms.breadth_first_search.bfs_edges(self.junction_tree, clique)
# update the beliefs of all the nodes starting from the root to leaves using root's belief
# downward pass
for edge in bfs_edges:
_update_beliefs(edge[0], edge[1])
else:
break
| mit |
gwaygenomics/nf1_inactivation | scripts/describe_data.py | 1 | 4903 | '''
Gregory Way 2016
NF1 Inactivation Classifier for Glioblastoma
scripts/describe_data.py
Get sample size and number of NF1 mutations
Usage:
Run on the command line 'python describe_data.py'
There are also optional flags
--type Ignore different types of mutations (defaults to 'Silent')
--tissue Tissue to consider (defaults to 'GBM')
--gene The gene to query (defaults to 'NF1')
--out-file Output file to write mutation counts
--mut-file Mutation description table loc (tables/nf1_mutations.tsv)
Output:
Three distinct files including:
1) A tissue sample dictionary for RNAseq measured samples
2) A table tracking the number of mutations per tissue of specific genes
3) A table of the specific mutations for each of the NF1 mutated samples
'''
import pandas as pd
import pickle
import argparse
####################################
# Load Command Arguments
####################################
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--type", dest="mut_types",
help="different types of mutations to write",
default='Silent')
parser.add_argument("-t", "--tissue", dest="tissue",
help="tissues to query", default='GBM')
parser.add_argument("-g", "--gene", dest="gene",
help="genes to query", default='NF1')
parser.add_argument("-f", "--out-file", dest="out_fh",
help="filename of where to save output",
default='tables/sample_mut_table.tsv')
parser.add_argument("-o", "--mut-file", dest="mut_fh",
help="filename of where to save mutation table output",
default='tables/nf1_mutations.tsv')
args = parser.parse_args()
####################################
# Load Constants
####################################
MUT_TYPES = args.mut_types.split(',')
TISSUE = args.tissue
GENE = args.gene
OUT_FH = args.out_fh
MUT_FH = args.mut_fh
####################################
# Load data
####################################
mutation = pd.read_csv('data/PANCAN_mutation', delimiter='\t')
clinical = pd.read_csv('data/PANCAN_clinicalMatrix', delimiter='\t')
rnaseq = pd.read_csv('data/HiSeqV2', delimiter='\t', index_col=0)
tissue_dict = pd.read_csv('tables/tcga_dictionary.tsv', delimiter='\t')
####################################
# Determine Tissue Specific Samples
####################################
tissues = clinical._primary_disease.dropna().unique().tolist()
tissues.remove('FFPE pilot phase II')
# Get full tissue name
tissue_name = tissue_dict[tissue_dict['acronym'] == TISSUE]['tissue'].values[0]
# Initialize a tissue sample dictionary be used to store sample IDs
tissue_dict_samples = {}
# Subset clinical information to tissue
clin_sub = clinical.loc[clinical['_primary_disease'] == tissue_name, :]
samp_size = clin_sub.shape[0]
# What are the sample ids
sample_ids = clin_sub['sampleID'].tolist()
# RNAseq samples
rnaseq_samp = set(sample_ids) & set(rnaseq.columns)
# populate the tissue sample dictionary
tissue_dict_samples[TISSUE] = sample_ids
# Subset the mutation file to only these samples
mut_samples = mutation.loc[mutation['#sample'].isin(sample_ids), :]
# Subset the RNAseq file to only those with NF1 mutations
rnaseq_pos = set(mut_samples['#sample']) & set(rnaseq.columns)
# Get types of mutations
nf1_mutations = mut_samples.loc[mut_samples['gene'] == GENE, :]
# Filter mutations
for mut_type in MUT_TYPES:
nf1_mutations = nf1_mutations.loc[mutation['effect'] != mut_type, :]
# Samples with non-silent mutations
nf1_mut_samples = set(nf1_mutations['#sample'].tolist())
# Number of samples with NF1 mutations and RNAseq measurements
nf1_rnaseq = nf1_mut_samples.intersection(rnaseq_samp)
# Compile info for input tissue
tissue_info = [
TISSUE,
tissue_name,
samp_size,
len(set(mut_samples['#sample'].tolist())),
len(rnaseq_samp),
len(rnaseq_pos),
len(nf1_mut_samples),
len(nf1_rnaseq),
]
# Subset the mutations we have RNAseq info for
rnaseq_nf1 = nf1_mutations[nf1_mutations['#sample'].isin(nf1_rnaseq)]
####################################
# Save output files to disk
####################################
with open('output/tissue_sample_dictionary.p', 'wb') as pickle_fh:
pickle.dump(tissue_dict_samples, pickle_fh)
tissue_info = pd.DataFrame(tissue_info).T
tissue_info.columns = ['acronym', 'disease', 'total samples',
'samples with mutations', 'samples with RNAseq',
'samples with mutations and RNAseq',
'total NF1 mutations', 'NF1 mutations with RNAseq']
tissue_info.to_csv(OUT_FH, sep='\t', index=False)
use_cols = ['#sample', 'chr', 'start', 'end', 'reference', 'alt', 'gene',
'effect', 'Amino_Acid_Change']
rnaseq_nf1_subset = rnaseq_nf1[use_cols]
rnaseq_nf1_subset.to_csv(MUT_FH, sep='\t', index=False)
| bsd-3-clause |
badbytes/pymeg | meg/spectral.py | 1 | 106416 | """
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
:meth:`contiguous_regions`
return the indices of the regions spanned by some logical mask
:meth:`cross_from_below`
return the indices where a 1D array crosses a threshold from below
:meth:`cross_from_above`
return the indices where a 1D array crosses a threshold from above
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly:
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
from numpy import angle, imag, real
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
Pfft = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
Pfft[:,i] = fx[:numFreqs]
#Pfft_real[:,i] = real(fx[:numFreqs])
#Pfft_imag[:,i] = imag(fx[:numFreqs]) #* fx[:numFreqs]#* imag(fy[:numFreqs])
#return Pxy, Pfft ####
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
#Pfft *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t, Pfft
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs, Pfft = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs, Pfft, Pxx.imag
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t, Pfft = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
Pfft = Pfft.mean(axis=1)
return Pxy, freqs, Pfft
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t, Pfft= _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t, Pfft
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N* = 2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
polyval function
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
polyfit function
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
"""alias for longest_contiguous_ones"""
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Lyapunov Exponent
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <[email protected]>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
Motivation for this function
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
def cross_from_below(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, eg the i's where::
x[i-1]<threshold and x[i]>=threshold
Example code::
import matplotlib.pyplot as plt
t = np.arange(0.0, 2.0, 0.1)
s = np.sin(2*np.pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, '-o')
ax.axhline(0.5)
ax.axhline(-0.5)
ind = cross_from_below(s, 0.5)
ax.vlines(t[ind], -1, 1)
ind = cross_from_above(s, -0.5)
ax.vlines(t[ind], -1, 1)
plt.show()
.. seealso::
:func:`cross_from_above` and :func:`contiguous_regions`
"""
x = np.asarray(x)
threshold = threshold
ind = np.nonzero( (x[:-1]<threshold) & (x[1:]>=threshold))[0]
if len(ind): return ind+1
else: return ind
def cross_from_above(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, eg the i's where::
x[i-1]>threshold and x[i]<=threshold
.. seealso::
:func:`cross_from_below` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero( (x[:-1]>=threshold) & (x[1:]<threshold))[0]
if len(ind): return ind+1
else: return ind
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| gpl-3.0 |
nrz/ylikuutio | external/bullet3/examples/pybullet/gym/pybullet_examples/testrender_np.py | 4 | 2607 | #make sure to compile pybullet with PYBULLET_USE_NUMPY enabled
#otherwise use testrender.py (slower but compatible without numpy)
#you can also use GUI mode, for faster OpenGL rendering (instead of TinyRender CPU)
import numpy as np
import matplotlib.pyplot as plt
import pybullet
import time
import pybullet_data
plt.ion()
img = np.random.rand(200, 320)
#img = [tandard_normal((50,100))
image = plt.imshow(img, interpolation='none', animated=True, label="blah")
ax = plt.gca()
#pybullet.connect(pybullet.GUI)
pybullet.connect(pybullet.DIRECT)
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())
pybullet.loadURDF("plane.urdf", [0, 0, -1])
pybullet.loadURDF("r2d2.urdf")
camTargetPos = [0, 0, 0]
cameraUp = [0, 0, 1]
cameraPos = [1, 1, 1]
pybullet.setGravity(0, 0, -10)
pitch = -10.0
roll = 0
upAxisIndex = 2
camDistance = 4
pixelWidth = 320
pixelHeight = 200
nearPlane = 0.01
farPlane = 100
fov = 60
main_start = time.time()
while (1):
for yaw in range(0, 360, 10):
pybullet.stepSimulation()
start = time.time()
viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch,
roll, upAxisIndex)
aspect = pixelWidth / pixelHeight
projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane)
img_arr = pybullet.getCameraImage(pixelWidth,
pixelHeight,
viewMatrix,
projectionMatrix,
shadow=1,
lightDirection=[1, 1, 1],
renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
stop = time.time()
print("renderImage %f" % (stop - start))
w = img_arr[0] #width of the image, in pixels
h = img_arr[1] #height of the image, in pixels
rgb = img_arr[2] #color data RGB
dep = img_arr[3] #depth data
print('width = %d height = %d' % (w, h))
#note that sending the data to matplotlib is really slow
#reshape is needed
np_img_arr = np.reshape(rgb, (h, w, 4))
np_img_arr = np_img_arr * (1. / 255.)
#show
#plt.imshow(np_img_arr,interpolation='none',extent=(0,1600,0,1200))
#image = plt.imshow(np_img_arr,interpolation='none',animated=True,label="blah")
image.set_data(np_img_arr)
ax.plot([0])
#plt.draw()
#plt.show()
plt.pause(0.01)
#image.draw()
main_stop = time.time()
print("Total time %f" % (main_stop - main_start))
pybullet.resetSimulation()
| agpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/text_labels_and_annotations/fonts_demo_kw.py | 1 | 2151 | """
=============
Fonts Demo Kw
=============
Same as fonts_demo using kwargs. If you prefer a more pythonic, OO
style of coding, see examples/fonts_demo.py.
"""
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import numpy as np
plt.subplot(111, facecolor='w')
alignment = {'horizontalalignment': 'center', 'verticalalignment': 'baseline'}
# Show family options
families = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
t = plt.text(-0.8, 0.9, 'family', size='large', **alignment)
yp = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
for k, family in enumerate(families):
t = plt.text(-0.8, yp[k], family, family=family, **alignment)
# Show style options
styles = ['normal', 'italic', 'oblique']
t = plt.text(-0.4, 0.9, 'style', **alignment)
for k, style in enumerate(styles):
t = plt.text(-0.4, yp[k], style, family='sans-serif', style=style,
**alignment)
# Show variant options
variants = ['normal', 'small-caps']
t = plt.text(0.0, 0.9, 'variant', **alignment)
for k, variant in enumerate(variants):
t = plt.text(0.0, yp[k], variant, family='serif', variant=variant,
**alignment)
# Show weight options
weights = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
t = plt.text(0.4, 0.9, 'weight', **alignment)
for k, weight in enumerate(weights):
t = plt.text(0.4, yp[k], weight, weight=weight,
**alignment)
# Show size options
sizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large']
t = plt.text(0.8, 0.9, 'size', **alignment)
for k, size in enumerate(sizes):
t = plt.text(0.8, yp[k], size, size=size,
**alignment)
x = -0.4
# Show bold italic
t = plt.text(x, 0.1, 'bold italic', style='italic',
weight='bold', size='x-small',
**alignment)
t = plt.text(x, 0.2, 'bold italic',
style='italic', weight='bold', size='medium',
**alignment)
t = plt.text(x, 0.3, 'bold italic',
style='italic', weight='bold', size='x-large',
**alignment)
plt.axis([-1, 1, 0, 1])
plt.show()
| mit |
jbalm/ActuarialCashFlowModel | core_math/test_function_optim.py | 1 | 2814 | # Python Programs
from . import functions_credit
from . import function_optim
## Python packages
import numpy as np
import unittest
import pickle
from scipy.optimize import minimize
import numpy.linalg as la
import xlwings as xw
import matplotlib.pyplot as plt
def test_function_spread():
mu=5
alpha=0.1
sigma=0.75
recovery_rate=0.35
with open('data\pickle\historical_transition_matrix.pkl', 'rb') as input:
historical_transition_matrix = pickle.load(input)
historical_generator_matrix = functions_credit.generator_matrix(historical_transition_matrix)
w, v = la.eig(historical_generator_matrix)
eigenval_hist_gen = w.real
eigenvect_hist_gen = (v.T).real
for l in range(len(eigenvect_hist_gen)):
eigenvect_hist_gen[l] = eigenvect_hist_gen[l]/la.norm(eigenvect_hist_gen[l])
eigenvect_hist_gen = eigenvect_hist_gen.T
with open('data\pickle\spread.pkl', 'rb') as input:
spread_list = pickle.load(input)
col_index = pickle.load(input)
row_index = pickle.load(input)
AAA_AA=True
def f_penal(v):
return function_optim.function_optim(v[0], v[1], v[2], v[3], recovery_rate, eigenvect_hist_gen, eigenval_hist_gen, row_index, col_index, spread_list, AAA_AA )
+ 1000 *(v[1] - 0.1)**2 + 1000 * (v[2] - 5 )**2 + 100* (v[3] -0.75)**2
bdss = [(0.001,None), (0.01, 5), (1,20), (0.01,1)]
res_penal = minimize(f_penal, x0=[3, 0.1, 5, 0.75], bounds = bdss)
pi_0 = res_penal.x[0]
alpha =res_penal.x[1]
mu = res_penal.x[2]
sigma = res_penal.x[3]
spread = function_optim.function_spread(pi_0, alpha,mu, sigma, recovery_rate, eigenvect_hist_gen, eigenval_hist_gen)
fig = plt.figure()
fig.set_size_inches(6,5)
plt.plot(range(20), np.asarray(spread)[:,0], label = 'spreads AAA')
plt.plot(range(20), np.asarray(spread)[:,1], label = 'spreads AA')
plt.plot(range(20), np.asarray(spread)[:,2], label = 'spreads A')
plt.plot(range(20), np.asarray(spread)[:,3], label = 'spreads BBB')
plt.plot(range(20), np.asarray(spread)[:,4], label = 'spreads BB')
plt.plot(range(20), np.asarray(spread)[:,5], label = 'spreads B')
plt.plot(range(20), np.asarray(spread)[:,6], label = 'spreads CCC')
plt.xlabel('Time (in years)', fontsize=18)
plt.ylabel('spreads', fontsize=16)
plt.legend()
xw.sheets['Testing'].range('E4').clear_contents()
xw.sheets['Testing'].pictures.add(fig, name='Spread Calibré',
left = xw.sheets['Testing'].range('E4').left, top = xw.sheets['Testing'].range('E4').top, update=True)
def Test_FunctionsOptim():
test_function_spread()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
maxlikely/scikit-learn | sklearn/datasets/species_distributions.py | 4 | 7844 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: Simplified BSD
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3
from urllib.request import urlopen
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6,
dtype=np.int16):
"""
load a coverage file.
This will return a numpy array of the given dtype
"""
try:
header = [F.readline() for i in range(header_length)]
except:
F = open(F)
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header['NODATA_value']
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Paramters
---------
F : string or file object
file object or name of file
Returns
-------
rec : np.ndarray
record array representing the data
"""
try:
names = F.readline().strip().split(',')
except:
F = open(F)
names = F.readline().strip().split(',')
rec = np.loadtxt(F, skiprows=1, delimiter=',',
dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages,
dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/ensemble/plot_forest_importances.py | 1 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| mit |
xapple/seqenv | seqenv/outputs.py | 1 | 9924 | # Built-in modules #
import warnings, marshal, re
# Internal modules #
import seqenv
from seqenv.common.cache import property_cached
from seqenv.common.autopaths import DirectoryPath
# Third party modules #
import pandas
# We don't want the annoying h5py warning #
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import biom
################################################################################
class OutputGenerator(object):
"""Once the Analysis is done running and all the data is in memory in the
form of python objects, this object will take care of generating
all the output files the user could possibly want. You pass it the Analysis
object obviously."""
sep = '\t'
float_format = '%.5g'
def __init__(self, analysis):
self.analysis = analysis
self.a = analysis
# --------------------------- In this section --------------------------- #
# make_all
# df_seqs_concepts
# abundance_mat_mult
# df_sample_concepts
# df_sample_names
def make_all(self):
"""Let's generate all the files"""
# General matrices #
self.tsv_seq_to_concepts()
self.tsv_seq_to_names()
self.list_sequence_concept()
# Only in the with 'samples' case #
if self.a.abundances: self.tsv_samples_to_names()
if self.a.abundances: self.biom_output()
# Graphical outputs #
self.per_seq_dot_files()
if self.a.abundances: self.per_sample_dot_files()
@property_cached
def df_seqs_concepts(self):
"""A normalized dataframe with sequences as columns and concepts (envo terms) as rows."""
# Get the data #
df = pandas.DataFrame(self.a.seq_to_counts)
df = df.fillna(0)
# Rename to original names #
df = df.rename(columns=self.a.renamed_to_orig)
# Rename envo integers to envo strings #
envo_int_to_id = lambda e: "ENVO:%08d" % e
df = df.rename(index=envo_int_to_id)
# Return
return df
def abundance_mat_mult(self, human_names=False):
"""We operate a matrix multiplication with the abundances
file provided, to link samples to either concept human readable names
or the envo term IDs."""
# Get results #
df1 = self.df_seqs_concepts
# Rename #
if human_names: df1 = df1.rename(index=self.a.concept_to_name)
# Remove those that were discarded #
df2 = self.a.df_abundances
df2 = df2.loc[df1.columns]
# Odd bug detection #
if any(s[:1].isdigit() for s in df2.columns):
msg = "None of the sample names in file '%s' can start with a number"
raise Exception(msg % self.a.abundances.filename)
# Multiply them (dot product) #
assert all(df1.columns == df2.index)
df = df1.dot(df2)
# Return
return df
@property_cached
def df_sample_concepts(self):
"""A dataframe where we operate a matrix multiplication with the abundances
file provided, to link samples to concept envo terms."""
return self.abundance_mat_mult(False)
@property_cached
def df_sample_names(self):
"""A dataframe where we operate a matrix multiplication with the abundances
file provided, to link samples to concept human readable names."""
return self.abundance_mat_mult(True)
# --------------------------- In this section --------------------------- #
# tsv_seq_to_concepts
# tsv_seq_to_names
# tsv_samples_to_names
# list_sequence_concept
# biom_output
def tsv_seq_to_concepts(self, name="seq_to_concepts.tsv"):
"""A TSV matrix file containing the df_seqs_concepts matrix"""
with open(self.a.out_dir + name, 'w') as handle:
content = self.df_seqs_concepts.to_csv(None, sep=self.sep, float_format=self.float_format)
handle.writelines(content)
def tsv_seq_to_names(self, name='seq_to_names.tsv'):
"""A TSV matrix file where we translate the concept to human readable names"""
with open(self.a.out_dir + name, 'w') as handle:
df = self.df_seqs_concepts.rename(index=self.a.concept_to_name)
content = df.to_csv(None, sep=self.sep, float_format=self.float_format)
handle.writelines(content)
def tsv_samples_to_names(self, name='samples_to_names.tsv'):
"""A TSV matrix file with matrix `df_sample_names`."""
with open(self.a.out_dir + name, 'w') as handle:
content = self.df_sample_names.to_csv(None, sep=self.sep, float_format=self.float_format)
handle.writelines(content)
def list_sequence_concept(self, name='list_concepts_found.tsv'):
"""A flat TSV file listing every concept found for every sequence.
It has one concept per line and looks something like this:
- OTU1, ENVO:00001, ocean, 4, GIs : [56, 123, 345]
- OTU1, ENVO:00002, soil, 7, GIs : [22, 44]
"""
# Useful later #
gi_to_key = lambda gi: self.a.db.get("gi","id",gi)[1]
key_to_envos = lambda key: marshal.loads(self.a.db.get("isolation","id",key)[2])
gi_to_envos = lambda gi: key_to_envos(gi_to_key(gi))
# Loop #
with open(self.a.out_dir + name, 'w') as handle:
for seq, gis in self.a.seq_to_gis.items():
gis = [gi for gi in gis if gi in self.a.db]
isokeys = set(gi_to_key(gi) for gi in gis)
envos = [e for key in isokeys for e in key_to_envos(key)]
for envo in envos:
seq_name = self.a.renamed_to_orig[seq]
envo_id = "ENVO:%08d" % envo
concept_name = self.a.integer_to_name.get(envo, envo_id)
concept_gis = [gi for gi in gis if envo in gi_to_envos(gi)]
count_gis = len(concept_gis)
line = (seq_name, envo_id, concept_name, str(count_gis), str(concept_gis))
handle.write('\t'.join(line) + '\n')
def biom_output(self, name='samples.biom'):
"""The same matrix as the user gave in the abundance file, but with source
information attached for every sequence.
See http://biom-format.org"""
data = self.a.df_abundances
with open(self.a.out_dir + name, 'w') as handle:
# Basic #
sample_ids = data.columns
sample_md = None
observation_ids = data.index
# Observation metadata #
observation_md = []
for seq in data.index:
seq_name = self.a.orig_names_to_renamed[seq]
counts = self.a.seq_to_counts.get(seq_name)
if not counts: observation_md.append({})
else: observation_md.append({'source': counts})
# Output #
t = biom.table.Table(data.transpose().as_matrix(), sample_ids, observation_ids, sample_md, observation_md)
handle.write(t.to_json('seqenv version %s') % seqenv.__version__)
# --------------------------- In this section --------------------------- #
# per_seq_dot_files
# per_sample_dot_files
def per_seq_dot_files(self):
"""Generations of files that can be viewed in `graphviz`.
There is one dotfile per every input sequence.
We also automiatcally make a corresponding PDF file."""
# The output directory #
directory = DirectoryPath(self.a.out_dir+'per_seq_ontology/')
directory.create_if_not_exists()
# Main loop #
for seq in self.a.seq_to_counts:
dot_path = directory + seq + '.dot'
pdf_path = directory + seq + '.pdf'
counts = self.a.seq_to_counts[seq]
counts = {"ENVO:%08d"%k:v for k,v in counts.items()}
total = sum(counts.values())
counts = {k:v/total for k,v in counts.items()}
envos = counts.keys()
graph = self.a.ontology.get_subgraph(envos)
graph = self.a.ontology.add_weights(graph, counts)
graph = self.a.ontology.add_style(graph)
self.a.ontology.write_to_dot(graph, dot_path)
self.a.ontology.add_legend(dot_path)
self.a.ontology.draw_to_pdf(dot_path, pdf_path)
def per_sample_dot_files(self):
"""Generations of files that can be viewed in `graphviz`.
There is one dotfile per every sample inputted.
We also automatically make a corresponding PDF file."""
# The output directory #
directory = DirectoryPath(self.a.out_dir+'per_sample_ontology/')
directory.create_if_not_exists()
# Main loop #
for i, sample in self.df_sample_concepts.iteritems():
# File path #
sanitized_name = "".join([c for c in sample.name if re.match(r'\w', c)])
dot_path = directory + sanitized_name +'.dot'
pdf_path = directory + sanitized_name +'.pdf'
# Counts #
counts = sample / sample.sum()
counts = dict(counts)
envos = counts.keys()
# Skip sample if it has no counts #
if sample.sum() == 0: continue
# Make graph #
graph = self.a.ontology.get_subgraph(envos)
graph = self.a.ontology.add_weights(graph, counts)
graph = self.a.ontology.add_style(graph)
# Write output #
self.a.ontology.write_to_dot(graph, dot_path)
self.a.ontology.add_legend(dot_path)
self.a.ontology.draw_to_pdf(dot_path, pdf_path)
# --------------------------- In this section --------------------------- #
# output_3
def output_3(self):
"""Possible output #3: the number of terms per OTU
OTU1: 0
OTU2: 2
OTU3: 1"""
pass | mit |
OSUrobotics/privacy-interfaces | filtering/privacy/scripts/boundingBox.py | 1 | 2868 | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import PointCloud2, Image
from cv_bridge import CvBridge
import cv, cv2
import numpy
import matplotlib
import copy
# import code # FOR TESTING
class Bound():
def __init__(self, topic):
self.bridge = CvBridge()
rospy.Subscriber(topic, Image, self.image_callback)
self.pub = rospy.Publisher(topic + '_filtered', Image)
def image_callback(self, image_in):
# Import and convert
image_cv = self.bridge.imgmsg_to_cv(image_in, 'bgr8')
image_cv2 = numpy.array(image_cv, dtype=numpy.uint8)
image_hsv = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2HSV)
image_hsv = cv2.blur(image_hsv, (5, 5))
# Make binary image of pinkness
lowerb = numpy.array((50, 100,100))
upperb = numpy.array((80,255, 255))
# lowerb = numpy.array((130, 50, 0))
# upperb = numpy.array((175, 255, 255))
# is_pink = cv2.inRange(image_hsv, numpy.array((130, 50, 0)), numpy.array((175, 255, 255)))
# Make binary image of pinkness
# is_pink = cv2.inRange(image_hsv, numpy.array((50, 92, 50)), numpy.array((132, 231, 187)))
is_green = cv2.inRange(image_hsv, lowerb, upperb)
green = copy.deepcopy(image_cv2)
for dim in range(3): green[:, :, dim] *= is_green / 255
green_avg = numpy.sum(numpy.sum(green, 0), 0) / numpy.sum(numpy.sum(is_green / 255, 0), 0)
green_avg = tuple([int(green_avg[0]), int(green_avg[1]), int(green_avg[2])])
# print green_avg
#code.interact(local=locals())
# Manipulate binary image
contours, hierarchy = cv2.findContours(is_green, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#code.interact(local=locals())
#print contours
print contours
max_area = 0
for index, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
best_index = index
try:
best_contour = contours[best_index]
# rospy.loginfo('Best contour was contour #{0} with area of {1}'.format(best_index, max_area))
cv2.fillConvexPoly(image_cv2, best_contour, -1)
# cv2.drawContours(image_cv2, contours, best_index, green_avg, thickness=-1) # fill in the largest pink blob
except(UnboundLocalError):
pass
#cv2.drawContours(image_cv2, contours, best_index, (0,0,0)) # draw black line around largest pink blob
# Apply binary image to full image
# image_hsv[:,:] = green_avg
for dim in range(3): image_hsv[:,:,dim] *= is_green / 255
# Convert back to ROS Image msg
# image_hsv_float = (pink.astype(float) + 1) / 256
# image_rgb = ((matplotlib.colors.hsv_to_rgb(image_hsv_float) * 256) - 1).astype('uint8')
# image_cv2 = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR)
image_cv = cv.fromarray(image_cv2)
image_out = self.bridge.cv_to_imgmsg(image_cv, 'bgr8')
self.pub.publish(image_out)
if __name__ == '__main__':
rospy.init_node('boundingBox')
boundingBox = Bound('/camera/rgb/image_color')
rospy.spin()
| mit |
ch3ll0v3k/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/computation/engines.py | 8 | 3821 | """Engine classes for :func:`~pandas.eval`
"""
# flake8: noqa
import abc
from pandas import compat
from pandas.compat import DeepChainMap, map
import pandas.core.common as com
import pandas.formats.printing as printing
from pandas.computation.align import _align, _reconstruct_object
from pandas.computation.ops import (UndefinedVariableError,
_mathops, _reductions)
_ne_builtins = frozenset(_mathops + _reductions)
class NumExprClobberingError(NameError):
pass
def _check_ne_builtin_clash(expr):
"""Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
terms : Term
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ', '.join(map(repr, overlap))
raise NumExprClobberingError('Variables in expression "%s" '
'overlap with builtins: (%s)' % (expr, s))
class AbstractEngine(object):
"""Object serving as a base class for all engines."""
__metaclass__ = abc.ABCMeta
has_neg_frac = False
def __init__(self, expr):
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self):
"""Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return printing.pprint_thing(self.expr)
def evaluate(self):
"""Run the engine on the expression
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
obj : object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = _align(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return _reconstruct_object(self.result_type, res, self.aligned_axes,
self.expr.terms.return_type)
@property
def _is_aligned(self):
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
pass
class NumExprEngine(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def __init__(self, expr):
super(NumExprEngine, self).__init__(expr)
def convert(self):
return str(super(NumExprEngine, self).convert())
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
try:
env = self.expr.env
scope = env.full_scope
truediv = scope['truediv']
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope, truediv=truediv)
except KeyError as e:
# python 3 compat kludge
try:
msg = e.message
except AttributeError:
msg = compat.text_type(e)
raise UndefinedVariableError(msg)
class PythonEngine(AbstractEngine):
"""Evaluate an expression in Python space.
Mostly for testing purposes.
"""
has_neg_frac = False
def __init__(self, expr):
super(PythonEngine, self).__init__(expr)
def evaluate(self):
return self.expr()
def _evaluate(self):
pass
_engines = {'numexpr': NumExprEngine, 'python': PythonEngine}
| apache-2.0 |
anirudhjayaraman/scikit-learn | sklearn/neighbors/unsupervised.py | 117 | 4755 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/frame/test_to_csv.py | 1 | 44694 | # -*- coding: utf-8 -*-
from __future__ import print_function
import csv
import pytest
from numpy import nan
import numpy as np
from pandas.compat import (lmap, range, lrange, StringIO, u)
from pandas.core.common import _all_none
from pandas.errors import ParserError
from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp,
date_range, read_csv, compat, to_datetime)
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
ensure_clean,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameToCSV(TestData):
def read_csv(self, path, **kwargs):
params = dict(index_col=0, parse_dates=True)
params.update(**kwargs)
return pd.read_csv(path, **params)
def test_from_csv_deprecation(self):
# see gh-17812
with ensure_clean('__tmp_from_csv_deprecation__') as path:
self.tsframe.to_csv(path)
with tm.assert_produces_warning(FutureWarning):
depr_recons = DataFrame.from_csv(path)
assert_frame_equal(self.tsframe, depr_recons)
def test_to_csv_from_csv1(self):
with ensure_clean('__tmp_to_csv_from_csv1__') as path:
self.frame['A'][:5] = nan
self.frame.to_csv(path)
self.frame.to_csv(path, columns=['A', 'B'])
self.frame.to_csv(path, header=False)
self.frame.to_csv(path, index=False)
# test roundtrip
self.tsframe.to_csv(path)
recons = self.read_csv(path)
assert_frame_equal(self.tsframe, recons)
self.tsframe.to_csv(path, index_label='index')
recons = self.read_csv(path, index_col=None)
assert(len(recons.columns) == len(self.tsframe.columns) + 1)
# no index
self.tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
assert_almost_equal(self.tsframe.values, recons.values)
# corner case
dm = DataFrame({'s1': Series(lrange(3), lrange(3)),
's2': Series(lrange(2), lrange(2))})
dm.to_csv(path)
recons = self.read_csv(path)
assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self):
with ensure_clean('__tmp_to_csv_from_csv2__') as path:
# duplicate index
df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
columns=['x', 'y', 'z'])
df.to_csv(path)
result = self.read_csv(path)
assert_frame_equal(result, df)
midx = MultiIndex.from_tuples(
[('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx,
columns=['x', 'y', 'z'])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2],
parse_dates=False)
assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = self.frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
pytest.raises(ValueError, self.frame2.to_csv, path,
header=['AA', 'X'])
def test_to_csv_from_csv3(self):
with ensure_clean('__tmp_to_csv_from_csv3__') as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode='a', header=False)
xp = pd.concat([df1, df2])
rs = pd.read_csv(path, index_col=0)
rs.columns = lmap(int, rs.columns)
xp.columns = lmap(int, xp.columns)
assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with ensure_clean('__tmp_to_csv_from_csv4__') as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = pd.DataFrame({'dt_data': [i * dt for i in range(3)]},
index=pd.Index([i * dt for i in range(3)],
name='dt_index'))
df.to_csv(path)
result = pd.read_csv(path, index_col='dt_index')
result.index = pd.to_timedelta(result.index)
# TODO: remove renaming when GH 10875 is solved
result.index = result.index.rename('dt_index')
result['dt_data'] = pd.to_timedelta(result['dt_data'])
assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self):
# tz, 8260
with ensure_clean('__tmp_to_csv_from_csv5__') as path:
self.tzframe.to_csv(path)
result = pd.read_csv(path, index_col=0, parse_dates=['A'])
converter = lambda c: to_datetime(result[c]).dt.tz_localize(
'UTC').dt.tz_convert(self.tzframe[c].dt.tz)
result['B'] = converter('B')
result['C'] = converter('C')
assert_frame_equal(result, self.tzframe)
def test_to_csv_cols_reordering(self):
# GH3454
import pandas as pd
chunksize = 5
N = int(chunksize * 2.5)
df = mkdf(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = pd.read_csv(path, index_col=0)
assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
import pandas as pd
def _check_df(df, cols=None):
with ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = pd.read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(
cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
assert_series_equal(obj_df, obj_rs)
else:
assert_frame_equal(
obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = mkdf(N, 3)
df.columns = ['a', 'a', 'b']
_check_df(df, None)
# dupe cols with selection
cols = ['b', 'a']
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
from pandas import NaT
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range('2000', freq='5min', periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with ensure_clean('1.csv') as pth:
df = DataFrame(dict(a=s1, b=s2))
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth)._convert(datetime=True,
coerce=True)
assert_frame_equal(df, recons, check_names=False,
check_less_precise=True)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(df, r_dtype=None, c_dtype=None,
rnlvl=None, cnlvl=None, dupe_col=False):
kwargs = dict(parse_dates=False)
if cnlvl:
if rnlvl is not None:
kwargs['index_col'] = lrange(rnlvl)
kwargs['header'] = lrange(cnlvl)
with ensure_clean('__tmp_to_csv_moar__') as path:
df.to_csv(path, encoding='utf8',
chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs['header'] = 0
with ensure_clean('__tmp_to_csv_moar__') as path:
df.to_csv(path, encoding='utf8', chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, compat.text_type):
return x.decode('utf8')
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[
:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1:]
type_map = dict(i='i', f='f', s='O', u='O', dt='O', p='O')
if r_dtype:
if r_dtype == 'u': # unicode
r_dtype = 'O'
recons.index = np.array(lmap(_to_uni, recons.index),
dtype=r_dtype)
df.index = np.array(lmap(_to_uni, df.index), dtype=r_dtype)
elif r_dtype == 'dt': # unicode
r_dtype = 'O'
recons.index = np.array(lmap(Timestamp, recons.index),
dtype=r_dtype)
df.index = np.array(
lmap(Timestamp, df.index), dtype=r_dtype)
elif r_dtype == 'p':
r_dtype = 'O'
recons.index = np.array(
list(map(Timestamp, to_datetime(recons.index))),
dtype=r_dtype)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())),
dtype=r_dtype)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == 'u':
c_dtype = 'O'
recons.columns = np.array(lmap(_to_uni, recons.columns),
dtype=c_dtype)
df.columns = np.array(
lmap(_to_uni, df.columns), dtype=c_dtype)
elif c_dtype == 'dt':
c_dtype = 'O'
recons.columns = np.array(lmap(Timestamp, recons.columns),
dtype=c_dtype)
df.columns = np.array(
lmap(Timestamp, df.columns), dtype=c_dtype)
elif c_dtype == 'p':
c_dtype = 'O'
recons.columns = np.array(
lmap(Timestamp, to_datetime(recons.columns)),
dtype=c_dtype)
df.columns = np.array(
lmap(Timestamp, df.columns.to_timestamp()),
dtype=c_dtype)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
assert_frame_equal(df, recons, check_names=False,
check_less_precise=True)
N = 100
chunksize = 1000
for ncols in [4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type='dt',
c_idx_type='s'), 'dt', 's')
for ncols in [4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type='dt',
c_idx_type='s'), 'dt', 's')
pass
for r_idx_type, c_idx_type in [('i', 'i'), ('s', 's'), ('u', 'dt'),
('p', 'p')]:
for ncols in [1, 2, 3, 4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type=r_idx_type,
c_idx_type=c_idx_type),
r_idx_type, c_idx_type)
for ncols in [1, 2, 3, 4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = mkdf(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=lrange(10)))
_do_test(mkdf(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(mkdf(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(mkdf(nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2),
rnlvl=2, cnlvl=2)
def test_to_csv_from_csv_w_some_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
self.frame['H'] = self.frame.index.map(f)
with ensure_clean() as path:
self.frame.to_csv(path)
recons = self.read_csv(path)
# TODO to_csv drops column name
assert_frame_equal(self.frame, recons, check_names=False)
assert_frame_equal(np.isinf(self.frame),
np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['E'] = np.inf
self.frame['F'] = -np.inf
with ensure_clean() as path:
self.frame.to_csv(path)
recons = self.read_csv(path)
# TODO to_csv drops column name
assert_frame_equal(self.frame, recons, check_names=False)
assert_frame_equal(np.isinf(self.frame),
np.isinf(recons), check_names=False)
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with ensure_clean('__tmp_to_csv_no_index__') as path:
df = DataFrame({'c1': [1, 2, 3], 'c2': [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df, result)
df['c3'] = Series([7, 8, 9], dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ['a', 'b', 'c'],
1: ['aa', 'bb', 'cc']})
df['test'] = 'txt'
assert df.to_csv() == df.to_csv(columns=[0, 1, 'test'])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])
with ensure_clean('__tmp_to_csv_headers__') as path:
from_df.to_csv(path, header=['X', 'Y'])
recons = self.read_csv(path)
assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=['X', 'Y'])
recons = self.read_csv(path)
recons.reset_index(inplace=True)
assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self):
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
frame.index = new_index
with ensure_clean('__tmp_to_csv_multiindex__') as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=['A', 'B'])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1],
parse_dates=False)
# TODO to_csv drops column name
assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
self.frame.index = old_index
# try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=['time', 'foo'])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
assert_almost_equal(recons.values, self.tsframe.values)
# needed if setUp becomes class method
self.tsframe.index = old_index
with ensure_clean('__tmp_to_csv_multiindex__') as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ['first', 'second']
return DataFrame(np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')], names=names),
dtype='int64')
# column & index are multi-index
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3],
index_col=[0, 1])
assert_frame_equal(df, result)
# column is mi
df = mkdf(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(
path, header=[0, 1, 2, 3], index_col=0)
assert_frame_equal(df, result)
# dup column names?
df = mkdf(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3],
index_col=[0, 1, 2])
assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert _all_none(*result.columns.names)
result.columns.names = df.columns.names
assert_frame_equal(df, result)
# tupleize_cols=True and index=False
df = _make_frame(True)
with tm.assert_produces_warning(FutureWarning):
df.to_csv(path, tupleize_cols=True, index=False)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = read_csv(path, header=0,
tupleize_cols=True,
index_col=None)
result.columns = df.columns
assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1],
index_col=[0])
assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1],
index_col=[0])
assert_frame_equal(df, result)
# column & index are multi-index (compatibility)
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
with tm.assert_produces_warning(FutureWarning):
df.to_csv(path, tupleize_cols=True)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = read_csv(path, header=0, index_col=[0, 1],
tupleize_cols=True)
result.columns = df.columns
assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = 'len of {i}, but only 5 lines in file'.format(i=i)
with tm.assert_raises_regex(ParserError, msg):
read_csv(path, header=lrange(i), index_col=0)
# write with cols
with tm.assert_raises_regex(TypeError, 'cannot specify cols '
'with a MultiIndex'):
df.to_csv(path, columns=['foo', 'bar'])
with ensure_clean('__tmp_to_csv_multiindex__') as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(',')[2] == '999'
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})
with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:
df.to_csv(path)
df2 = self.read_csv(path)
assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return ["%s%03d" % (name, i) for i in range(5)]
df_float = DataFrame(np.random.randn(
100, 5), dtype='float64', columns=create_cols('float'))
df_int = DataFrame(np.random.randn(100, 5),
dtype='int64', columns=create_cols('int'))
df_bool = DataFrame(True, index=df_float.index,
columns=create_cols('bool'))
df_object = DataFrame('foo', index=df_float.index,
columns=create_cols('object'))
df_dt = DataFrame(Timestamp('20010101'),
index=df_float.index, columns=create_cols('date'))
# add in some nans
df_float.loc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = dict()
for n, dtype in [('float', np.float64), ('int', np.int64),
('bool', np.bool), ('object', np.object)]:
for c in create_cols(n):
dtypes[c] = dtype
with ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(filename, index_col=0, dtype=dtypes,
parse_dates=create_cols('date'))
assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(np.random.randn(1000, 30), columns=lrange(
15) + lrange(15), dtype='float64')
with ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype='float64')
df_int = DataFrame(np.random.randn(1000, 3), dtype='int64')
df_bool = DataFrame(True, index=df_float.index, columns=lrange(3))
df_object = DataFrame('foo', index=df_float.index, columns=lrange(3))
df_dt = DataFrame(Timestamp('20010101'),
index=df_float.index, columns=lrange(3))
df = pd.concat([df_float, df_int, df_bool, df_object,
df_dt], axis=1, ignore_index=True)
cols = []
for i in range(5):
cols.extend([0, 1, 2])
df.columns = cols
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ['0.4', '1.4', '2.4']:
result[i] = to_datetime(result[i])
result.columns = df.columns
assert_frame_equal(result, df)
# GH3457
from pandas.util.testing import makeCustomDataframe as mkdf
N = 10
df = mkdf(N, 3)
df.columns = ['a', 'a', 'b']
with ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={'a.1': 'a'})
assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({'A': lrange(100000)})
aa['B'] = aa.A + 1.0
aa['C'] = aa.A + 2.0
aa['D'] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO('a,1.0\nb,2.0')
df = self.read_csv(f1, header=None)
newdf = DataFrame({'t': df[df.columns[0]]})
with ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
with ensure_clean() as path:
df.to_csv(path, encoding='UTF-8')
df2 = read_csv(path, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
df.to_csv(path, encoding='UTF-8', index=False)
df2 = read_csv(path, index_col=None, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO('')
df = DataFrame(
[[u("\u05d0"), "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=[u("\u05d0"),
u("\u05d1"), u("\u05d2"), u("\u05d3")],
index=[u("\u05d0"), u("\u05d1")])
df.to_csv(buf, encoding='UTF-8')
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_stringio(self):
buf = StringIO()
self.frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
# TODO to_csv drops column name
assert_frame_equal(recons, self.frame, check_names=False)
def test_to_csv_float_format(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, float_format='%.2f')
rs = read_csv(filename, index_col=0)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,
encoding='utf-8')
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({'A': ['hello', '{"hello"}']})
for encoding in (None, 'utf-8'):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE,
encoding=encoding, index=False)
result = buf.getvalue()
expected = 'A\nhello\n{"hello"}\n'
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected = ('A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, line_terminator='\r\n')
expected = (',A,B\r\n'
'one,1,4\r\n'
'two,2,5\r\n'
'three,3,6\r\n')
assert buf.getvalue() == expected
buf = StringIO()
df.to_csv(buf) # The default line terminator remains \n
expected = (',A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
assert buf.getvalue() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output as when one
# would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
res = StringIO()
s.to_csv(res)
exp = StringIO()
s2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = self.frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
def test_to_csv_compression_gzip(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="gzip")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="gzip", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is gziped
import gzip
f = gzip.open(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
assert col in text
def test_to_csv_compression_bz2(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="bz2")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="bz2", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is bz2ed
import bz2
f = bz2.BZ2File(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
assert col in text
def test_to_csv_compression_xz(self):
# GH11852
# use the compression kw in to_csv
tm._skip_if_no_lzma()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="xz")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="xz", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is xzipped
lzma = compat.import_lzma()
f = lzma.open(filename, 'rb')
assert_frame_equal(df, read_csv(f, index_col=0))
f.close()
def test_to_csv_compression_value_error(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
# zip compression is not supported and should raise ValueError
import zipfile
pytest.raises(zipfile.BadZipfile, df.to_csv,
filename, compression="zip")
def test_to_csv_date_format(self):
with ensure_clean('__tmp_to_csv_date_format__') as path:
dt_index = self.tsframe.index
datetime_frame = DataFrame(
{'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
datetime_frame.to_csv(path, date_format='%Y%m%d')
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(
lambda x: int(x.strftime('%Y%m%d')))
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format='%Y-%m-%d')
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(
lambda x: x.strftime('%Y-%m-%d'))
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime('%Y-%m-%d'))
assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format='%Y%m%d')
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(
lambda x: int(x.strftime('%Y%m%d')))
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = (
datetime_frame_columns.columns
.map(lambda x: x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])
nat_frame = DataFrame({'A': nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format='%Y-%m-%d')
test = read_csv(path, parse_dates=[0, 1], index_col=0)
assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with ensure_clean('csv_date_format_with_dst') as path:
# make sure we are not failing on transitions
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10s')]:
time_range = np.array(range(len(i)), dtype='int64')
df = DataFrame({'A': time_range}, index=i)
df.to_csv(path, index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index).tz_localize(
'UTC').tz_convert('Europe/London')
assert_frame_equal(result, df)
# GH11619
idx = pd.date_range('2015-01-01', '2015-12-31',
freq='H', tz='Europe/Paris')
df = DataFrame({'values': 1, 'idx': idx},
index=idx)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_csv(path, index=True)
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index).tz_localize(
'UTC').tz_convert('Europe/Paris')
result['idx'] = to_datetime(result['idx']).astype(
'datetime64[ns, Europe/Paris]')
assert_frame_equal(result, df)
# assert working
df.astype(str)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_pickle(path)
result = pd.read_pickle(path)
assert_frame_equal(result, df)
def test_to_csv_quoting(self):
df = DataFrame({
'c_string': ['a', 'b,c'],
'c_int': [42, np.nan],
'c_float': [1.0, 3.2],
'c_bool': [True, False],
})
expected = """\
,c_bool,c_float,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,"b,c"
"""
result = df.to_csv()
assert result == expected
result = df.to_csv(quoting=None)
assert result == expected
result = df.to_csv(quoting=csv.QUOTE_MINIMAL)
assert result == expected
expected = """\
"","c_bool","c_float","c_int","c_string"
"0","True","1.0","42.0","a"
"1","False","3.2","","b,c"
"""
result = df.to_csv(quoting=csv.QUOTE_ALL)
assert result == expected
# see gh-12922, gh-13259: make sure changes to
# the formatters do not break this behaviour
expected = """\
"","c_bool","c_float","c_int","c_string"
0,True,1.0,42.0,"a"
1,False,3.2,"","b,c"
"""
result = df.to_csv(quoting=csv.QUOTE_NONNUMERIC)
assert result == expected
msg = "need to escape, but no escapechar set"
tm.assert_raises_regex(csv.Error, msg, df.to_csv,
quoting=csv.QUOTE_NONE)
tm.assert_raises_regex(csv.Error, msg, df.to_csv,
quoting=csv.QUOTE_NONE,
escapechar=None)
expected = """\
,c_bool,c_float,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,b!,c
"""
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='!')
assert result == expected
expected = """\
,c_bool,c_ffloat,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,bf,c
"""
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='f')
assert result == expected
# see gh-3503: quoting Windows line terminators
# presents with encoding?
text = 'a,b,c\n1,"test \r\n",3\n'
df = pd.read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding='utf-8', index=False)
assert buf.getvalue() == text
# xref gh-7791: make sure the quoting parameter is passed through
# with multi-indexes
df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
df = df.set_index(['a', 'b'])
expected = '"a","b","c"\n"1","3","5"\n"2","4","6"\n'
assert df.to_csv(quoting=csv.QUOTE_ALL) == expected
def test_period_index_date_overflow(self):
# see gh-15982
dates = ["1990-01-01", "2000-01-01", "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected = ',0\n1990-01-01,4\n2000-01-01,5\n3005-01-01,6\n'
assert result == expected
date_format = "%m-%d-%Y"
result = df.to_csv(date_format=date_format)
expected = ',0\n01-01-1990,4\n01-01-2000,5\n01-01-3005,6\n'
assert result == expected
# Overflow with pd.NaT
dates = ["1990-01-01", pd.NaT, "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n'
assert result == expected
| apache-2.0 |
AnasGhrab/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
mcallaghan/tmv | BasicBrowser/twitter/management/commands/scrape_searches.py | 1 | 4634 | from django.core.management.base import BaseCommand, CommandError
from twitter.models import *
import pandas as pd
import csv
import twint
import json
import os
import sys
from datetime import datetime, timedelta
import django
from django.utils import timezone
import time
from pathlib import Path
class Command(BaseCommand):
help = 'redoes searches'
def add_arguments(self, parser):
parser.add_argument('weeks', type=int)
def handle(self, *args, **options):
def parse_tjson(tsearch,fname):
with open(fname) as f:
for l in f:
tweet = json.loads(l)
try:
user, created = User.objects.get_or_create(
id=tweet['user_id']
)
except:
print(tweet)
sys.exit()
if created:
try:
user.screen_name=tweet['username']
user.save()
except:
print(tweet)
break
status, created = Status.objects.get_or_create(
id=tweet['id']
)
status.fetched = timezone.now()
status.save()
status.searches.add(tsearch)
if created:
t = datetime.strptime(
"{} {}".format(tweet['date'],tweet['time']),
"%Y-%m-%d %H:%M:%S"
)
t = django.utils.timezone.make_aware(t)
status.author=user
status.created_at=t
status.favorites_count = tweet['likes_count']
status.retweets_count = tweet['retweets_count']
status.place = tweet['location']
status.text = tweet['tweet']
try:
status.save()
except:
status.text = None
status.save()
prog, created = SearchProgress.objects.get_or_create(server="apsis")
if prog.search_date is None:
now = datetime.now() #- timedelta(days=77)
else:
now = prog.search_date
for i in range(options['weeks']):
try:
td = (now - datetime.now()).days
except:
td = (now - django.utils.timezone.make_aware(datetime.now())).days
if abs(td/7) > options['weeks']:
now = datetime.now()
then = now - timedelta(days=8)
print(now.strftime("%Y-%m-%d"))
print(then.strftime("%Y-%m-%d"))
try:
prog.search_date = django.utils.timezone.make_aware(now)
except:
prog.search_date = now
prog.save()
for ts in TwitterSearch.objects.all().order_by('id'):
print(ts.string)
if ts.search_since and prog.search_date < ts.search_since:
print("skipping, as we are searching since later than this")
time.sleep(0.1)
continue
try:
os.remove("tweets/tweets.json")
except:
pass
folder = f"tweets/tweets_{ts.string}_{now.strftime('%Y-%m-%d')}"
fname = f"{folder}/tweets.json"
c = twint.Config()
c.Search = ts.string
c.Since = then.strftime("%Y-%m-%d")
c.Until = now.strftime("%Y-%m-%d")
c.Store_json = True
c.Output = folder
twint.run.Search(c)
path = Path(fname)
if path.exists():
parse_tjson(ts,fname)
try:
os.remove(fname)
os.rmdir(folder)
except:
pass
try:
then = django.utils.timezone.make_aware(now)
except:
then = then
ts.scrape_fetched=now
if ts.since is None or ts.since > then:
ts.since = then
if ts.until is None or ts.until < now:
ts.until = now
ts.save()
now = now - timedelta(days=7)
| gpl-3.0 |
manahl/arctic | arctic/serialization/incremental.py | 1 | 10075 | import abc
import hashlib
import logging
from threading import RLock
import numpy as np
import pandas as pd
from bson import Binary
from arctic._config import ARCTIC_AUTO_EXPAND_CHUNK_SIZE
from arctic.serialization.numpy_records import PandasSerializer
from .._compression import compress
from .._config import MAX_DOCUMENT_SIZE
from .._util import NP_OBJECT_DTYPE
from ..exceptions import ArcticSerializationException
ABC = abc.ABCMeta('ABC', (object,), {})
log = logging.getLogger(__name__)
def incremental_checksum(item, curr_sha=None, is_bytes=False):
curr_sha = hashlib.sha1() if curr_sha is None else curr_sha
curr_sha.update(item if is_bytes else item.tostring())
return curr_sha
class LazyIncrementalSerializer(ABC):
def __init__(self, serializer, input_data, chunk_size):
if chunk_size < 1:
raise ArcticSerializationException("LazyIncrementalSerializer can't be initialized "
"with chunk_size < 1 ({})".format(chunk_size))
if not serializer:
raise ArcticSerializationException("LazyIncrementalSerializer can't be initialized "
"with a None serializer object")
self.input_data = input_data
self.chunk_size = chunk_size
self._serializer = serializer
self._initialized = False
self._checksum = None
@abc.abstractmethod
def __len__(self):
pass
@abc.abstractproperty
def generator(self):
pass
@abc.abstractproperty
def generator_bytes(self):
pass
@abc.abstractproperty
def serialize(self):
pass
class IncrementalPandasToRecArraySerializer(LazyIncrementalSerializer):
def __init__(self, serializer, input_data, chunk_size, string_max_len=None):
super(IncrementalPandasToRecArraySerializer, self).__init__(serializer, input_data, chunk_size)
if not isinstance(serializer, PandasSerializer):
raise ArcticSerializationException("IncrementalPandasToRecArraySerializer requires a serializer of "
"type PandasSerializer.")
if not isinstance(input_data, (pd.DataFrame, pd.Series)):
raise ArcticSerializationException("IncrementalPandasToRecArraySerializer requires a pandas DataFrame or "
"Series as data source input.")
if string_max_len and string_max_len < 1:
raise ArcticSerializationException("IncrementalPandasToRecArraySerializer can't be initialized "
"with string_max_len < 1 ({})".format(string_max_len))
self.string_max_len = string_max_len
# The state which needs to be lazily initialized
self._dtype = None
self._shape = None
self._rows_per_chunk = 0
self._total_chunks = 0
self._has_string_object = False
self._lock = RLock()
def _dtype_convert_to_max_len_string(self, input_ndtype, fname):
if input_ndtype.type not in (np.string_, np.unicode_):
return input_ndtype, False
type_sym = 'S' if input_ndtype.type == np.string_ else 'U'
max_str_len = len(max(self.input_data[fname].astype(type_sym), key=len))
str_field_dtype = np.dtype('{}{:d}'.format(type_sym, max_str_len)) if max_str_len > 0 else input_ndtype
return str_field_dtype, True
def _get_dtype(self):
# Serializer is being called only if can_convert_to_records_without_objects() has passed,
# which means that the resulting recarray does not contain objects but only numpy types, string, or unicode
# Serialize the first row to obtain info about row size in bytes (cache first few rows only)
# Also raise an Exception early, if data are not serializable
first_chunk, serialized_dtypes = self._serializer.serialize(
self.input_data[0:10] if len(self) > 0 else self.input_data,
string_max_len=self.string_max_len)
# This is the common case, where first row's dtype represents well the whole dataframe's dtype
if serialized_dtypes is None or \
len(self.input_data) == 0 or \
NP_OBJECT_DTYPE not in self.input_data.dtypes.values:
return first_chunk, serialized_dtypes, False
# Reaching here means we have at least one column of type object
# To correctly serialize incrementally, we need to know the final dtype (type and fixed length),
# using length-conversion information from all values of the object columns
dtype_arr = []
has_string_object = False
for field_name in serialized_dtypes.names: # include all column names, along with the expanded multi-index
field_dtype = serialized_dtypes[field_name]
if field_name not in self.input_data or self.input_data.dtypes[field_name] is NP_OBJECT_DTYPE:
# Note: .hasobject breaks for timezone-aware datetime64 pandas columns, so compare with dtype('O')
# if column is an expanded multi index or doesn't contain objects, the serialized 1st row dtype is safe
field_dtype, with_str_object = self._dtype_convert_to_max_len_string(field_dtype, field_name)
has_string_object |= with_str_object
dtype_arr.append((field_name, field_dtype))
return first_chunk, np.dtype(dtype_arr), has_string_object
def _lazy_init(self):
if self._initialized:
return
with self._lock:
if self._initialized: # intentional double check here
return
# Get the dtype of the serialized array (takes into account object types, converted to fixed length strings)
first_chunk, dtype, has_string_object = self._get_dtype()
# Compute the number of rows which can fit in a chunk
rows_per_chunk = 0
if len(self) > 0 and self.chunk_size > 1:
rows_per_chunk = IncrementalPandasToRecArraySerializer._calculate_rows_per_chunk(self.chunk_size, first_chunk)
# Initialize object's state
self._dtype = dtype
shp = list(first_chunk.shape)
shp[0] = len(self)
self._shape = tuple(shp)
self._has_string_object = has_string_object
self._rows_per_chunk = rows_per_chunk
self._total_chunks = int(np.ceil(float(len(self)) / self._rows_per_chunk)) if rows_per_chunk > 0 else 0
self._initialized = True
@staticmethod
def _calculate_rows_per_chunk(max_chunk_size, chunk):
sze = int(chunk.dtype.itemsize * np.prod(chunk.shape[1:]))
sze = sze if sze < max_chunk_size else max_chunk_size
rows_per_chunk = int(max_chunk_size / sze)
if rows_per_chunk < 1 and ARCTIC_AUTO_EXPAND_CHUNK_SIZE:
# If a row size is larger than chunk_size, use the maximum document size
logging.warning('Chunk size of {} is too small to fit a row ({}). '
'Using maximum document size.'.format(max_chunk_size, MAX_DOCUMENT_SIZE))
# For huge rows, fall-back to using a very large document size, less than max-allowed by MongoDB
rows_per_chunk = int(MAX_DOCUMENT_SIZE / sze)
if rows_per_chunk < 1:
raise ArcticSerializationException("Serialization failed to split data into max sized chunks.")
return rows_per_chunk
def __len__(self):
return len(self.input_data)
@property
def shape(self):
self._lazy_init()
return self._shape
@property
def dtype(self):
self._lazy_init()
return self._dtype
@property
def rows_per_chunk(self):
self._lazy_init()
return self._rows_per_chunk
def checksum(self, from_idx, to_idx):
if self._checksum is None:
self._lazy_init()
total_sha = None
for chunk_bytes, dtype in self.generator_bytes(from_idx=from_idx, to_idx=to_idx):
# TODO: what about compress_array here in batches?
compressed_chunk = compress(chunk_bytes)
total_sha = incremental_checksum(compressed_chunk, curr_sha=total_sha, is_bytes=True)
self._checksum = Binary(total_sha.digest())
return self._checksum
def generator(self, from_idx=None, to_idx=None):
return self._generator(from_idx=from_idx, to_idx=to_idx)
def generator_bytes(self, from_idx=None, to_idx=None):
return self._generator(from_idx=from_idx, to_idx=to_idx, get_bytes=True)
def _generator(self, from_idx, to_idx, get_bytes=False):
# Note that the range is: [from_idx, to_idx)
self._lazy_init()
my_length = len(self)
# Take into account default arguments and negative indexing (from end offset)
from_idx = 0 if from_idx is None else from_idx
if from_idx < 0:
from_idx = my_length + from_idx
to_idx = my_length if to_idx is None else min(to_idx, my_length)
if to_idx < 0:
to_idx = my_length + to_idx
# No data, finish iteration
if my_length == 0 or from_idx >= my_length or from_idx >= to_idx:
return
# Perform serialization for each chunk
while from_idx < to_idx:
curr_stop = min(from_idx + self._rows_per_chunk, to_idx)
chunk, _ = self._serializer.serialize(
self.input_data[from_idx: curr_stop],
string_max_len=self.string_max_len,
forced_dtype=self.dtype if self._has_string_object else None)
# Let the gc collect the intermediate serialized chunk as early as possible
chunk = chunk.tostring() if chunk is not None and get_bytes else chunk
yield chunk, self.dtype, from_idx, curr_stop
from_idx = curr_stop
def serialize(self):
return self._serializer.serialize(self.input_data, self.string_max_len)
| lgpl-2.1 |
kdebrab/pandas | pandas/_version.py | 5 | 16218 | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
from pandas.compat import PY3
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig(object):
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "pandas-"
cfg.versionfile_source = "pandas/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run {dispcmd}".format(dispcmd=dispcmd))
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if PY3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run {dispcmd} (error)".format(dispcmd=dispcmd))
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '{root}', but '{dirname}' "
"doesn't start with prefix '{parentdir_prefix}'".format(
root=root, dirname=dirname,
parentdir_prefix=parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '{}', no digits".format(",".join(refs - tags)))
if verbose:
print("likely tags: {}".format(",".join(sorted(tags))))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking {r}".format(r=r))
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in {root}".format(root=root))
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: "
"'{describe_out}'".format(
describe_out=describe_out))
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '{full_tag}' doesn't start with prefix " \
"'{tag_prefix}'"
print(fmt.format(full_tag=full_tag, tag_prefix=tag_prefix))
pieces["error"] = ("tag '{full_tag}' doesn't start with "
"prefix '{tag_prefix}'".format(
full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "{:d}.g{}".format(pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.{:d}.g{}".format(pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post{:d}".format(pieces["distance"])
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g{}".format(pieces["short"])
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g{}".format(pieces["short"])
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '{style}'".format(style=style))
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| bsd-3-clause |
kagayakidan/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
btabibian/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 157 | 2409 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
nhejazi/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 46 | 4415 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "LabelKFold", "LeaveOneOut", "LeaveOneLabelOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {0:6f} with std. dev. of {1:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause |
jswoboda/RadarDataSim | beamtools/pickBeams.py | 2 | 17711 | #!/usr/bin/env python
"""
This GUI is based off of a GUI originally developed by Steven Chen at SRI.
The code was cleaned up so that the GUI is now encompassed in a class structure. Also
the ability to switch between PFISR and RISR-N has been added along with a finish button.
The code also outputs a picture of the selected beam pattern.
@author: John Swoboda
Updated by Greg Starr so it can be used as part of a larger GUI
"""
import Tkinter
import tkFileDialog
import os, inspect
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import tables
from beamfuncs import BeamSelector
import pdb
from isrutilities.sensorConstants import get_files
def rect(r, w, deg=1):
# radian if deg=0; degree if deg=1
from math import cos, sin, pi
if deg:
w = pi * w / 180.0
return r * cos(w), r * sin(w)
def polar(x, y, deg=1):
# radian if deg=0; degree if deg=1
from math import hypot, atan2, pi
if deg:
return hypot(x, y), 180.0 * atan2(y, x) / pi
else:
return hypot(x, y), atan2(y, x)
class Gui():
def __init__(self,parent,subgui=True):
# get the current path
curpath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
constpath = os.path.join(os.path.split(curpath)[0],'SimISR','const')
# set the root
self.parent = parent
self.subgui = subgui
# set up frames for list
self.frame1 = Tkinter.Frame(self.parent)
self.frame1.grid(row=0,column=0)
self.frame2 = Tkinter.Frame(self.parent)
self.frame2.grid(row=0,column=1)
self.output = []
self.beamhandle = None
if subgui:
self.sizecanv = [500,500]
self.beamcodeent= Tkinter.Entry(self.frame1)
self.beamcodeent.grid(row=1,column=1)
self.beamcodeentlabel = Tkinter.Label(self.frame1,text="Enter Beamcodes")
self.beamcodeentlabel.grid(row=1,column=0,sticky='e')
self.beambuttex = Tkinter.Button(self.frame1, text="Read", command=self.readbcobar)
self.beambuttex.grid(row=1,column=2,sticky='w')
self.beambutt = Tkinter.Button(self.frame1, text="Import", command=self.beambuttonClick)
self.beambutt.grid(row=2,column=2,sticky='w')
canvrow = 3
else:
self.sizecanv = [1000,1000]
self.leb = Tkinter.Label(self.frame1, text="Beam Selector",font=("Helvetica", 16))
self.leb.grid(row=0, sticky=Tkinter.W+Tkinter.E+Tkinter.N+Tkinter.S,columnspan=2)
self.butt = Tkinter.Button(self.frame1, text="Finished", command=self.buttonClick)
self.butt.grid(row=1,column=1,sticky='w')
self.beamcodeent= Tkinter.Entry(self.frame1)
self.beamcodeent.grid(row=2,column=1,sticky='w')
self.beamcodeentlabel = Tkinter.Label(self.frame1,text="Enter Beamcodes")
self.beamcodeentlabel.grid(row=2,column = 0,sticky='e')
self.beambuttex = Tkinter.Button(self.frame1, text="Read", command=self.readbcobar)
self.beambuttex.grid(row=2,column=2,sticky='w')
self.beambutt = Tkinter.Button(self.frame1, text="Import", command=self.beambuttonClick)
self.beambutt.grid(row=3,column=2,sticky='w')
canvrow = 4
self.off_x = self.sizecanv[0]/2
self.off_y = self.sizecanv[1]/2
self.div = 75.0*self.sizecanv[0]/1000.0
self.lat = [80,70,60,50,40,30]
self.angles = np.arange(0,180,30)
self.var = Tkinter.StringVar()
self.var.set("PFISR")
self.choices = {"PFISR":get_files('PFISR_PARAMS.h5'),
"RISR-N":get_files('RISR_PARAMS.h5'),
"Sondrestrom":get_files('Sondrestrom_PARAMS.h5'),
"Millstone":get_files('Millstone_PARAMS.h5')}#, "RISR-S":'file3'}
self.option = Tkinter.OptionMenu(self.frame1, self.var, *self.choices)
self.option.grid(row=1,column=0,sticky='w')
hfile=tables.open_file(self.choices[self.var.get()])
self.lines = hfile.root.Params.Kmat.read()
hfile.close()
self.readfile = Tkinter.StringVar()
# set up the canvas
self.canv = Tkinter.Canvas(self.frame1 , width=self.sizecanv[0], height=self.sizecanv[1],background='white')
self.canv.grid(row=canvrow,column=0,columnspan=2)
self.Drawlines()
self.Drawbeams()
self.canv.bind('<ButtonPress-1>', self.onCanvasClick)
self.canv.bind('<ButtonPress-2>', self.onCanvasRightClick)
self.var.trace('w', self.Changefile)
self.canv.update()
# beam list
self.bidlabel = Tkinter.Label(self.frame2,text="Beam ID")
self.bidlabel.grid(row=0,column=0)
self.azlabel = Tkinter.Label(self.frame2,text="Azimuth")
self.azlabel.grid(row=0,column=1)
self.ellabel = Tkinter.Label(self.frame2,text="Elevation")
self.ellabel.grid(row=0,column=2)
self.scroll = Tkinter.Scrollbar(self.frame2)
self.scroll.grid(row=1,column=3)
self.beamtext = Tkinter.Text(self.frame2,yscrollcommand=self.scroll.set)
self.beamtext.config(width=50,state=Tkinter.DISABLED)
self.beamtext.grid(row = 1,column = 0,columnspan=3)
self.beamlines = []
self.scroll.config(command=self.beamtext.yview)
# bounding box
self.boxbutton= Tkinter.Button(self.frame2, text="Angle Box", command=self.boxbuttonClick)
self.boxbutton.grid(row=2,column=0,sticky='w')
self.azminmaxlabel = Tkinter.Label(self.frame2,text="Az min and max")
self.azminmaxlabel.grid(row=3,column=0,sticky='e')
self.azmin= Tkinter.Entry(self.frame2)
self.azmin.grid(row=3,column=1,sticky='w')
self.azmax= Tkinter.Entry(self.frame2)
self.azmax.grid(row=3,column=2,sticky='w')
self.elminmaxlabel = Tkinter.Label(self.frame2,text="El min and max")
self.elminmaxlabel.grid(row=4,column=0,sticky='e')
self.elmin= Tkinter.Entry(self.frame2)
self.elmin.grid(row=4,column=1,sticky='w')
self.elmax= Tkinter.Entry(self.frame2)
self.elmax.grid(row=4,column=2,sticky='w')
# Az choice
self.azbutton=Tkinter.Button(self.frame2, text="Az Choice", command=self.azbuttonClick)
self.azbutton.grid(row=5,column=0,sticky='w')
self.azchoice= Tkinter.Entry(self.frame2)
self.azchoice.grid(row=5,column=1,sticky='w')
# Az choice
self.elbutton=Tkinter.Button(self.frame2, text="El Choice", command=self.elbuttonClick)
self.elbutton.grid(row=6,column=0,sticky='w')
self.elchoice= Tkinter.Entry(self.frame2)
self.elchoice.grid(row=6,column=1,sticky='w')
self.azsortbutton=Tkinter.Button(self.frame2, text="Az sort", command=self.azsortbuttonClick)
self.azsortbutton.grid(row=7,column=0,sticky='w')
self.elsortbutton=Tkinter.Button(self.frame2, text="El Sort", command=self.elsortbuttonClick)
self.elsortbutton.grid(row=7,column=1,sticky='w')
def Changefile(self,*args):
""" This function will change the files to a different radar system."""
filename= self.choices[self.var.get()]
self.beamtext.config(state=Tkinter.NORMAL)
self.beamtext.delete(1.0,'end')
self.beamtext.config(state=Tkinter.DISABLED)
self.readfile.set(filename)
hfile=tables.open_file(filename)
self.lines = hfile.root.Params.Kmat.read()
hfile.close()
self.output=[]
self.canv.delete(Tkinter.ALL)
self.Drawlines()
self.Drawbeams()
def Drawbeams(self):
"This function will draw all of the beams on the canvas."
div =self.div
#if self.beamhandle is not None:
off_x = self.off_x
off_y = self.off_y
self.beamhandles = []
self.ovalx = np.zeros(self.lines.shape[0])
self.ovaly = np.zeros(self.lines.shape[0])
for ibeam,beams in enumerate(self.lines):
c_coords = rect(90-beams[2],beams[1]-90)
#print c_coords
points = [c_coords[0]*div/10+off_x -5,
c_coords[1]*div/10+off_y-5,
c_coords[0]*div/10+off_x +5,
c_coords[1]*div/10+off_y+5]
self.ovalx[ibeam] = c_coords[0]*div/10+off_x
self.ovaly[ibeam] = c_coords[1]*div/10+off_y
self.beamhandles.append(self.canv.create_oval(points, fill='blue',tags='beams'))
def addbeamlist(self,beamlist):
""" """
div =self.div
off_x = self.off_x
off_y = self.off_y
for ibeam in beamlist:
c_coords = rect(90-ibeam[1],ibeam[0]-90)
x = c_coords[0]*div/10+off_x
y = c_coords[1]*div/10+off_y
dist = (self.ovalx-x)**2+(self.ovaly-y)**2
linesit = np.argmin(dist)
closest = self.lines[linesit]
if closest[0] not in self.output:
self.__addbeam__(closest,linesit)
def addbeamlistbco(self,bcolist):
""" Adds a set of beams based off of the beam numbers"""
allbco = self.lines[:,0]
allbco = np.array([int(i) for i in allbco])
for ibco in bcolist:
ibco = int(ibco)
linesit =np.flatnonzero(allbco==ibco)
if len(linesit)==0:
continue
linesit = linesit[0]
closest= self.lines[linesit]
if closest[0] not in self.output:
self.__addbeam__(closest,linesit)
def Drawlines(self):
"""This function will draw all of the lines on the canvas for the """
off_x = self.off_x
off_y = self.off_y
div = self.div
lat = self.lat
Nlat = len(lat)
angles = self.angles
# Create circles
textangles = {iang:(str(np.mod(270+iang,360)),str(np.mod(90+iang,360))) for iang in angles}
for i in range(1,7):
points = [-div*i+off_x, -div*i+off_y, div*i+off_x, div*i+off_y]
self.canv.create_oval(points,dash=(5,5))
self.canv.create_text(points[0]+(div*i)/(div/15.0),points[1]+(div*i)/(div/15.0),text=str(lat[i-1]))
for iang in angles:
iangr = iang*np.pi/180
cosan = np.cos(iangr)
sinan = np.sin(iangr)
points = [-div*Nlat*cosan+off_x, -div*Nlat*sinan+off_y, div*Nlat*cosan+off_x, div*Nlat*sinan+off_y]
self.canv.create_line(points,dash=(5,5))
self.canv.create_text(points[0],points[1], text=textangles[iang][0])
self.canv.create_text(points[2],points[3], text=textangles[iang][1])
def onCanvasClick(self,event):
"""This function will find the nearest beam """
p_coords = polar(event.x-self.off_x ,event.y-self.off_y)
x = self.canv.canvasx(event.x)
y = self.canv.canvasy(event.y)
dist = (self.ovalx-x)**2+(self.ovaly-y)**2
linesit = np.argmin(dist)
closest = self.lines[linesit]
if closest[0] not in self.output:
self.__addbeam__(closest,linesit)
else:
print("Repeated beam")
def onCanvasRightClick(self,event):
"""This will undo the choice of the nearest highlighted beam."""
x = self.canv.canvasx(event.x)
y = self.canv.canvasy(event.y)
dist = (self.ovalx-x)**2+(self.ovaly-y)**2
linesit = np.argmin(dist)
closest = self.lines[linesit]
if (closest[0] in self.output) and (dist[linesit]<self.div/5.0):
self.__removebeam__(closest,linesit)
def boxbuttonClick(self):
"""This the call back for the bounding box button where all of the beams
at a certian elevation are selected."""
inputvec = []
inputvec.append(self.azmin.get().strip())
inputvec.append(self.azmax.get().strip())
inputvec.append(self.elmin.get().strip())
inputvec.append(self.elmax.get().strip())
maxmin = [0.,359.99,0.,90.]
inputnums = []
for i,iin in enumerate(inputvec):
try:
inputnums.append(float(iin))
except:
inputnums.append(maxmin[i])
alldata = self.lines
azkeep = np.logical_and(alldata[:,1]>inputnums[0],alldata[:,1]<inputnums[1])
elkeep = np.logical_and(alldata[:,2]>inputnums[2],alldata[:,2]<inputnums[3])
allkeep = np.logical_and(azkeep,elkeep)
bcolist = alldata[allkeep,0]
if (len(bcolist)!=0) and (len(bcolist)!=len(alldata)):
self.addbeamlistbco(bcolist)
def azbuttonClick(self):
"""This the call back for the azimuth button where all of the beams
at a certian elevation are selected."""
azval = self.azchoice.get().strip()
try:
aznum = float(azval)
azkeep =np.in1d(self.lines[:,1],aznum)
bcoout = self.lines[azkeep,0]
self.addbeamlistbco(bcoout)
except:
print('Bad value for azimuth angle given')
def elbuttonClick(self):
"""This the call back for the elevation button where all of the beams
at a certian elevation are selected."""
elval = self.elchoice.get().strip()
try:
elnum = float(elval)
elkeep =np.in1d(self.lines[:,2],elnum)
bcoout = self.lines[elkeep,0]
self.addbeamlistbco(bcoout)
except:
print('Bad value for elevation angle given')
def __removebeam__(self,closest,linesit):
"""This removes a beam from the data"""
self.canv.itemconfig(self.beamhandles[linesit], fill='blue')
self.output.remove(closest[0])
beamstr = "{:>9} {:>9} {:>9}\n".format(closest[0],closest[1],closest[2])
self.beamlines.remove(beamstr)
self.beamtext.config(state=Tkinter.NORMAL)
self.beamtext.delete(1.0,'end')
for ibeam in self.beamlines:
self.beamtext.insert(Tkinter.INSERT,ibeam)
self.beamtext.config(state=Tkinter.DISABLED)
self.canv.update()
def azsortbuttonClick(self):
outlist = self.output
bmlist = self.lines
bcolist = bmlist[:,0]
azlist = bmlist[:,1]
azvals = [azlist[bcolist==i][0] for i in outlist ]
order = np.argsort(azvals)
self.updatelists(order)
def elsortbuttonClick(self):
outlist = self.output
bmlist = self.lines
bcolist = bmlist[:,0]
ellist = bmlist[:,2]
elvals = [ellist[bcolist==i][0] for i in outlist ]
order = np.argsort(elvals)
self.updatelists(order)
def __addbeam__(self,closest,linesit):
"""This will add a beam"""
textheader = 'Closest beam is # %s, Az: %s, El: %s' %(int(closest[0]),closest[1],closest[2])
self.canv.itemconfig(self.canv.find_withtag('header'),text=textheader)
self.canv.itemconfig(self.beamhandles[linesit], fill='orange')
self.output.append(closest[0])
self.canv.update()
beamstr = "{:>9} {:>9} {:>9}\n".format(closest[0],closest[1],closest[2])
self.beamtext.config(state=Tkinter.NORMAL)
self.beamtext.insert(Tkinter.INSERT,beamstr)
self.beamtext.config(state=Tkinter.DISABLED)
self.beamlines.append(beamstr)
bcolist = self.beamcodeent.get().split()
bcolist = [int(i.strip(',')) for i in bcolist]
cbco = int(closest[0])
if cbco not in bcolist:
bcolist.append(cbco)
bcoliststr = [str(ib) for ib in bcolist]
bcostr = ' '.join(bcoliststr)
self.beamcodeent.delete(0,'end')
self.beamcodeent.insert(0,bcostr)
def removebeamlistbco(self,bcolist):
""" Removes a set of beams based off of the beam numbers"""
allbco = self.lines[:,0]
allbco = np.array([int(i) for i in allbco])
for ibco in bcolist:
ibco = int(ibco)
linesit =np.flatnonzero(allbco==ibco)
if len(linesit)==0:
continue
linesit = linesit[0]
closest= self.lines[linesit]
if closest[0] in self.output:
self.__removebeam__(closest,linesit)
def updatelists(self,order):
neworder =[self.output[i] for i in order]
self.removebeamlistbco(neworder)
self.addbeamlistbco(neworder)
def buttonClick(self,fn=None):
"""This will output the beam list, create an image of the beams and close the program. """
if fn is None:
fn = tkFileDialog.asksaveasfilename(title="save Beam Codes and Image",filetypes=[('TXT','.txt')])
fnbase = ''.join(os.path.splitext(fn)[:-1])
allbeam = BeamSelector(self.lines)
fig = allbeam.plotbeams(self.output,True,fnbase+'.png',"Chosenbeams")
plt.close(fig)
f = open(fnbase+'.txt', 'w')
for beam in self.output:
f.write("%s\n" % (int(beam)))
if not self.subgui:
#sys.exit()
self.parent.destroy()
def beambuttonClick(self):
fn = tkFileDialog.askopenfilename(title="Load Beam Codes",filetypes=[('TXT','.txt')])
bcolist = np.loadtxt(fn)
self.addbeamlistbco(bcolist)
def readbcobar(self):
bcolist = self.beamcodeent.get().split()
bcolist = [int(i.strip(',')) for i in bcolist]
self.addbeamlistbco(bcolist)
def run_beam_gui():
"""Used to run the GUI as a function"""
root = Tkinter.Tk()
gui = Gui(root,False)
root.mainloop()
if __name__ == "__main__":
root = Tkinter.Tk()
gui = Gui(root,False)
root.mainloop() | mit |
kernc/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
jeffkinnison/awe-wq | lib/python2.7/site-packages/awe/voronoi.py | 2 | 3930 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Voronoi diagram from a list of points
# Copyright (C) 2011 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
def circumcircle(P1, P2, P3):
'''
Return center of the circle containing P1, P2 and P3
If P1, P2 and P3 are colinear, return None
Adapted from:
http://local.wasp.uwa.edu.au/~pbourke/geometry/circlefrom3/Circle.cpp
'''
delta_a = P2 - P1
delta_b = P3 - P2
if np.abs(delta_a[0]) <= 0.000000001 and np.abs(delta_b[1]) <= 0.000000001:
center_x = 0.5*(P2[0] + P3[0])
center_y = 0.5*(P1[1] + P2[1])
else:
aSlope = delta_a[1]/delta_a[0]
bSlope = delta_b[1]/delta_b[0]
if np.abs(aSlope-bSlope) <= 0.000000001:
return None
center_x= (aSlope*bSlope*(P1[1] - P3[1]) + bSlope*(P1[0] + P2 [0]) \
- aSlope*(P2[0]+P3[0]))/(2.*(bSlope-aSlope))
center_y = -(center_x - (P1[0]+P2[0])/2.)/aSlope + (P1[1]+P2[1])/2.
return center_x, center_y
def voronoi(X,Y):
''' Return line segments describing the voronoi diagram of X and Y '''
P = np.zeros((X.size+4,2))
P[:X.size,0], P[:Y.size,1] = X, Y
# We add four points at (pseudo) "infinity"
m = max(np.abs(X).max(), np.abs(Y).max())*1e5
P[X.size:,0] = -m, -m, +m, +m
P[Y.size:,1] = -m, +m, -m, +m
D = matplotlib.tri.Triangulation(P[:,0],P[:,1])
T = D.triangles
#axes = plt.subplot(1,1,1)
#plt.scatter(X,Y, s=5)
#patches = []
#for i,triang in enumerate(T):
# polygon = Polygon(np.array([P[triang[0]],P[triang[1]],P[triang[2]]]))
# patches.append(polygon)
#colors = 100*np.random.rand(len(patches))
#p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
#p.set_array(np.array(colors))
#axes.add_collection(p)
#plt.colorbar(p)
##lines = matplotlib.collections.LineCollection(segments, color='0.75')
##axes.add_collection(lines)
#plt.axis([0,1,0,1])
#plt.show()
#plt.savefig('test0.png')
n = T.shape[0]
C = np.zeros((n,2))
for i in range(n):
C[i] = circumcircle(P[T[i,0]],P[T[i,1]],P[T[i,2]])
X,Y = C[:,0], C[:,1]
#segments = []
#for i in range(n):
# for k in D.neighbors[i]:
# if k != -1:
# segments.append([(X[i],Y[i]), (X[k],Y[k])])
cells = [[] for i in range(np.max(T)+1)]
for i,triang in enumerate(T):
cells[triang[0]].append([X[i],Y[i]])
cells[triang[1]].append([X[i],Y[i]])
cells[triang[2]].append([X[i],Y[i]])
for i,cell in enumerate(cells):
angle = []
for coord in cell:
angle.append(np.arctan2((coord[1]-P[i,1]),(coord[0]-P[i,0])))
id = np.argsort(-np.array(angle))
cells[i] = np.array([cell[j] for j in id])
return cells
# -----------------------------------------------------------------------------
if __name__ == '__main__':
P = np.random.random((2,256))
X,Y = P[0],P[1]
fig = plt.figure(figsize=(10,10))
axes = plt.subplot(1,1,1)
plt.scatter(X,Y, s=5)
#segments = voronoi(X,Y)
cells = voronoi(X,Y)
patches = []
for cell in cells:
polygon = Polygon(cell,True)
patches.append(polygon)
#plt.scatter(cell[:,0],cell[:,1])
colors = 100*np.random.rand(len(patches))
print colors
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
p.set_array(np.array(colors))
axes.add_collection(p)
plt.colorbar(p)
#lines = matplotlib.collections.LineCollection(segments, color='0.75')
#axes.add_collection(lines)
plt.axis([0,1,0,1])
plt.show()
plt.savefig('test.png')
| gpl-2.0 |
annayqho/TheCannon | code/lamost/xcalib_5labels/make_lamost_catalog/debris_exploration.py | 1 | 1553 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import pyfits
a = pyfits.open("table_for_paper_full.fits")
b = a[1].data
feh = b['cannon_m_h']
am = b['cannon_alpha_m']
rv_all = b['rv']
rverr = b['rv_err']
rv = rv_all / rverr
good = rverr < 50
feh = feh[good]
am = am[good]
rv = rv[good]
lowlim = -200
highlim = 180
lowlim = -15
highlim = 13
plt.scatter(feh, am, c=rv, s=5, lw=0, vmin=-15, vmax=5, cmap="viridis")
plt.xlim(-2.5, 0.8)
plt.xlabel("[Fe/H]")
plt.ylabel("[alpha/M]")
plt.colorbar(label="RV/RV_err")
plt.ylim(-0.15, 0.55)
plt.show()
fig, axarr=plt.subplots(1,3, sharex=True, sharey=True)
ax = axarr[0]
choose1 = np.logical_and(rv < highlim, rv > lowlim)
ax.scatter(feh[choose1], am[choose1], c='k', alpha=1, s=5, lw=0, label="%s < RV (km/s) < %s" %(str(lowlim), str(highlim)))
ax.set_ylabel("alpha enhancement, a/M (dex)")
ax.legend()
ax = axarr[1]
choose = rv < lowlim
ax.scatter(feh[choose1], am[choose1], c='k', alpha=1, s=5,
lw=0, label="%s < RV (km/s) < %s" %(str(lowlim), str(highlim)))
ax.scatter(feh[choose], am[choose], c='r', alpha=1, s=5,
lw=0, label="RV (km/s) < %s" %str(lowlim))
ax.set_xlabel("metallicity, Fe/H (dex")
ax.legend()
ax = axarr[2]
choose = rv > highlim
ax.scatter(feh[choose1], am[choose1], c='k', alpha=1, s=5, lw=0, label="%s < RV (km/s) < %s" %(str(lowlim), str(highlim)))
ax.scatter(feh[choose], am[choose], c='r', alpha=1, s=5, lw = 0, label="RV (km/s) > %s" %str(highlim))
ax.legend()
plt.xlim(-2.5, 0.8)
plt.ylim(-0.15, 0.55)
plt.show()
| mit |
Lynn-015/NJU_DMRG | giggleliu/mps/mpslib.py | 1 | 4951 | #!/usr/bin/python
'''
Matrix Product State.
'''
from numpy import *
from matplotlib.pyplot import *
from matplotlib import patches
from matplotlib.collections import LineCollection
from scipy.linalg import svd,qr,rq,norm,block_diag
from mps import MPS,VidalMPS,KMPS,BMPS
from scipy import sparse as sps
import pdb,time
def state2VMPS(state,sitedim,tol=1e-8):
'''
Parse a normal state into a Vidal Matrix produdct state.
Parameters
--------------
state:
The target state, 1D array.
sitedim:
The dimension of a single site, integer.
tol:
The tolerence of singular value, float.
*return*:
A <VidalMPS> instance.
Note
---------------
`svd` method is used in decomposition.
'''
nsite=int(round(log(len(state))/log(sitedim)))
GL,LL=[],[]
ri=1
for i in xrange(nsite):
state=state.reshape([sitedim*ri,-1])
U,S,V=svd(state,full_matrices=False)
#remove zeros from v
kpmask=abs(S)>tol
ri=kpmask.sum()
S=S[kpmask]
LL.append(S)
state=S[:,newaxis]*V[kpmask]
U=U[:,kpmask]
U=U.reshape([-1,sitedim,ri])
ai=swapaxes(U,0,1)
if i==0:
gi=ai
else:
gi=ai/LL[-2][:,newaxis]
GL.append(gi)
S=LL[-1][0]
return VidalMPS(GL,LL[:-1],S)
def state2MPS(state,sitedim,l,method='qr',tol=1e-8):
'''
Parse a normal state into a Matrix produdct state.
state:
The target state, 1D array.
sitedim:
The dimension of a single site, integer.
l:
The division point of left and right canonical scanning, integer between 0 and number of site.
method:
The method to extract A,B matrices.
* 'qr' -> get A,B matrices by the method of QR decomposition, faster, rank revealing in a non-straight-forward way.
* 'svd' -> get A,B matrices by the method of SVD decomposition, slow, rank revealing.
tol:
The tolerence of singular value, float.
*return*:
A <MPS> instance.
'''
nsite=int(round(log(len(state))/log(sitedim)))
AL,BL=[],[]
ri=1
assert(method=='svd' or method=='qr')
assert(l>=0 and l<=nsite)
for i in xrange(l):
state=state.reshape([sitedim*ri,-1])
if method=='svd':
U,S,V=svd(state,full_matrices=False)
#remove zeros from v
kpmask=abs(S)>tol
ri=kpmask.sum()
state=S[kpmask,newaxis]*V[kpmask]
U=U[:,kpmask]
else:
U,state=qr(state,mode='economic')
kpmask=sum(abs(state),axis=1)>tol
ri=kpmask.sum()
state=state[kpmask]
U=U[:,kpmask]
ai=swapaxes(U.reshape([-1,sitedim,ri]),0,1)
AL.append(ai)
ri=1
for i in xrange(nsite-l):
state=state.reshape([-1,sitedim*ri])
if method=='svd':
U,S,V=svd(state,full_matrices=False)
#remove zeros from v
kpmask=abs(S)>tol
ri=kpmask.sum()
state=S[kpmask]*U[:,kpmask]
V=V[kpmask,:]
else:
state,V=rq(state,mode='economic')
kpmask=sum(abs(state),axis=0)>tol
ri=kpmask.sum()
state=state[:,kpmask]
V=V[kpmask]
bi=swapaxes(V.reshape([ri,sitedim,-1]),0,1)
BL.append(bi)
BL=BL[::-1]
S=state.diagonal()
return KMPS(AL,BL,S=S)
def mps_add(*args):
'''
Add <KMPS>.
Parameters
-----------
args:
<MPS> instances to be added.
'''
if len(args)<=1:
raise ValueError('At least 2 args is required.')
AL=[]
BL=[]
hndim=args[0].hndim
na=len(args[0].AL)
nb=len(args[0].BL)
nsite=na+nb
for i in xrange(na):
if i==0:
ai=[concatenate([mps.AL[i][j] for mps in args],axis=1) for j in xrange(hndim)]
elif i==nsite-1:
ai=[concatenate([mps.AL[i][j] for mps in args],axis=0) for j in xrange(hndim)]
else:
ai=[block_diag(*[mps.AL[i][j] for mps in args]) for j in xrange(hndim)]
AL.append(ai)
for i in xrange(nb):
if i+na==0:
bi=[concatenate([mps.BL[i][j] for mps in args],axis=1) for j in xrange(hndim)]
elif i+na==nsite-1:
bi=[concatenate([mps.BL[i][j] for mps in args],axis=0) for j in xrange(hndim)]
else:
bi=[block_diag(*[mps.BL[i][j] for mps in args]) for j in xrange(hndim)]
BL.append(bi)
S=concatenate([mps.S for mps in args])
return args[0].__class__(AL=AL,BL=BL,S=S)
def mps_unique(mps):
'''
View <MPS> as unique.
'''
mps.token=id(l)
def mps_viewsame(*mpses):
'''
View a set of <MPS> instances as same.
'''
mps0=mpses[0]
for mps in mpses[1:]:
mps.token=mps0.token
def compress(self,mps):
'''
Compress a MPS to compact form.
'''
raise Exception('Not Implemented')
| mit |
cin/spark | python/pyspark/serializers.py | 1 | 22617 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serialize objects in batches; By default, the batch size is chosen based
on the size of objects, also configurable by SparkContext's C{batchSize} parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class PythonEvalType(object):
NON_UDF = 0
SQL_BATCHED_UDF = 1
SQL_PANDAS_UDF = 2
SQL_PANDAS_GROUPED_UDF = 3
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
if the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowSerializer(FramedSerializer):
"""
Serializes bytes as Arrow data with the Arrow file format.
"""
def dumps(self, batch):
import pyarrow as pa
import io
sink = io.BytesIO()
writer = pa.RecordBatchFileWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
return sink.getvalue()
def loads(self, obj):
import pyarrow as pa
reader = pa.RecordBatchFileReader(pa.BufferReader(obj))
return reader.read_all()
def __repr__(self):
return "ArrowSerializer"
def _create_batch(series):
"""
Create an Arrow record batch from the given pandas.Series or list of Series, with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:return: Arrow RecordBatch
"""
from pyspark.sql.types import _check_series_convert_timestamps_internal
import pyarrow as pa
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
# If a nullable integer series has been promoted to floating point with NaNs, need to cast
# NOTE: this is not necessary with Arrow >= 0.7
def cast_series(s, t):
if type(t) == pa.TimestampType:
# NOTE: convert to 'us' with astype here, unit ignored in `from_pandas` see ARROW-1680
return _check_series_convert_timestamps_internal(s.fillna(0))\
.values.astype('datetime64[us]', copy=False)
# NOTE: can not compare None with pyarrow.DataType(), fixed with Arrow >= 0.7.1
elif t is not None and t == pa.date32():
# TODO: this converts the series to Python objects, possibly avoid with Arrow >= 0.8
return s.dt.date
elif t is None or s.dtype == t.to_pandas_dtype():
return s
else:
return s.fillna(0).astype(t.to_pandas_dtype(), copy=False)
# Some object types don't support masks in Arrow, see ARROW-1721
def create_array(s, t):
casted = cast_series(s, t)
mask = None if casted.dtype == 'object' else s.isnull()
return pa.Array.from_pandas(casted, mask=mask, type=t)
arrs = [create_array(s, t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
from pyspark.sql.types import _check_dataframe_localize_timestamps
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
# NOTE: changed from pa.Columns.to_pandas, timezone issue in conversion fixed in 0.7.1
pdf = _check_dataframe_localize_timestamps(batch.to_pandas())
yield [c for _, c in pdf.iteritems()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hook namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple
# those created in other module can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
return cloudpickle.dumps(obj, 2)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid sevialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
| apache-2.0 |
sandeepkbhat/pylearn2 | pylearn2/expr/tests/test_probabilistic_max_pooling.py | 44 | 24662 | from __future__ import print_function
import numpy as np
import warnings
from theano.compat.six.moves import xrange
from theano import config
from theano import function
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from pylearn2.expr.probabilistic_max_pooling import max_pool_python
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels_python
from pylearn2.expr.probabilistic_max_pooling import max_pool
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels
from pylearn2.expr.probabilistic_max_pooling import max_pool_b01c
from pylearn2.expr.probabilistic_max_pooling import max_pool_c01b
from pylearn2.expr.probabilistic_max_pooling import max_pool_unstable
from pylearn2.expr.probabilistic_max_pooling import max_pool_softmax_op
from pylearn2.expr.probabilistic_max_pooling import \
max_pool_softmax_with_bias_op
from pylearn2.testing import no_debug_mode
def check_correctness_channelwise(f):
"""
Tests that the theano expression emitted by f computes the same values
as the ground truth python function
Note: to keep the python version as dead simple as possible (i.e., to make
sure there are not bugs in the ground truth) it uses the numerically
unstable verison of softmax. So this test does not work with too big of
numbers.
"""
rng = np.random.RandomState([2012, 7, 19])
batch_size = 5
pool_size = 4
n = 3 * pool_size
zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5
top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)
p_np, h_np = max_pool_channels_python(zv, pool_size, top_down_v)
z_th = T.matrix()
z_th.name = 'z_th'
top_down_th = T.matrix()
top_down_th.name = 'top_down_th'
p_th, h_th = f(z_th, pool_size, top_down_th)
func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = func(zv, top_down_v)
assert p_np.shape == pv.shape
assert h_np.shape == hv.shape
if not np.allclose(h_np, hv):
print((h_np.min(), h_np.max()))
print((hv.min(), hv.max()))
assert False
if not np.allclose(p_np, pv):
diff = abs(p_np - pv)
print('max diff ', diff.max())
print('min diff ', diff.min())
print('ave diff ', diff.mean())
assert False
def check_correctness_sigmoid_channelwise(f):
"""
Tests that f is equivalent to the sigmoid function when the pool size is 1
"""
rng = np.random.RandomState([2012, 7, 19])
batch_size = 5
pool_size = 1
n = 3 * pool_size
zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5
top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)
z_th = T.matrix()
z_th.name = 'z_th'
top_down_th = T.matrix()
top_down_th.name = 'top_down_th'
p_th, h_th = f(z_th, pool_size, top_down_th)
h_s = T.nnet.sigmoid(z_th + top_down_th)
func = function([z_th, top_down_th], [p_th, h_th, h_s])
pv, hv, h_s = func(zv, top_down_v)
p_s = h_s
assert p_s.shape == pv.shape
assert h_s.shape == hv.shape
if not np.allclose(h_s, hv):
print((h_s.min(), h_s.max()))
print((hv.min(), hv.max()))
assert False
if not np.allclose(p_s, pv):
diff = abs(p_s - pv)
print('max diff ', diff.max())
print('min diff ', diff.min())
print('ave diff ', diff.mean())
assert False
def check_correctness(f):
rng = np.random.RandomState([2012, 7, 19])
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
zv = rng.randn(batch_size, rows, cols,
channels).astype(config.floatX) * 2. - 3.
p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols))
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype=config.floatX)()
z_th.name = 'z_th'
p_th, h_th = f(z_th, (pool_rows, pool_cols))
func = function([z_th], [p_th, h_th])
pv, hv = func(zv)
assert p_np.shape == pv.shape
assert h_np.shape == hv.shape
if not np.allclose(h_np, hv):
print((h_np.min(), h_np.max()))
print((hv.min(), hv.max()))
assert False
assert np.allclose(p_np, pv)
def check_correctness_bc01(f):
"""
Tests that the theano expression emitted by f computes the same values
as the ground truth python function
Note: to keep the python version as dead simple as possible (i.e., to make
sure there are not bugs in the ground truth) it uses the numerically
unstable verison of softmax. So this test does not work with too big of
numbers.
"""
rng = np.random.RandomState([2012, 7, 19])
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
zv = rng.randn(batch_size, rows, cols,
channels).astype(config.floatX) * 1. - 1.5
top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
channels).astype(config.floatX)
p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.name = 'z_th'
zr = z_th.dimshuffle(0, 3, 1, 2)
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.name = 'top_down_th'
top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)
p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)
func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),
h_th.dimshuffle(0, 2, 3, 1)])
pv, hv = func(zv, top_down_v)
assert p_np.shape == pv.shape
assert h_np.shape == hv.shape
if not np.allclose(h_np, hv):
print((h_np.min(), h_np.max()))
print((hv.min(), hv.max()))
assert False
if not np.allclose(p_np, pv):
diff = abs(p_np - pv)
print('max diff ', diff.max())
print('min diff ', diff.min())
print('ave diff ', diff.mean())
assert False
def check_correctness_c01b(f):
"""
Tests that the theano expression emitted by f computes the same values
as the ground truth python function
Note: to keep the python version as dead simple as possible (i.e., to make
sure there are not bugs in the ground truth) it uses the numerically
unstable version of softmax. So this test does not work with too big of
numbers.
"""
rng = np.random.RandomState([2013, 5, 6])
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
# Do the python ground truth in b01c format
zv = rng.randn(batch_size, rows, cols,
channels).astype(config.floatX) * 1. - 1.5
top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
channels).astype(config.floatX)
p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)
# Dimshuffle the inputs into c01b for the theano implementation
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.tag.test_value = zv
z_th.name = 'z_th'
zr = z_th.dimshuffle(3, 1, 2, 0)
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.name = 'top_down_th'
top_down_th.tag.test_value = top_down_v
top_down_r = top_down_th.dimshuffle(3, 1, 2, 0)
p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)
func = function([z_th, top_down_th], [p_th.dimshuffle(3, 1, 2, 0),
h_th.dimshuffle(3, 1, 2, 0)])
pv, hv = func(zv, top_down_v)
if not p_np.shape == pv.shape:
raise AssertionError(str((p_np.shape, pv.shape)))
assert h_np.shape == hv.shape
if not np.allclose(h_np, hv):
print((h_np.min(), h_np.max()))
print((hv.min(), hv.max()))
assert False
if not np.allclose(p_np, pv):
diff = abs(p_np - pv)
print('max diff ', diff.max())
print('min diff ', diff.min())
print('ave diff ', diff.mean())
assert False
warnings.warn("TODO: make sampling tests run on c01b format of pooling.")
@no_debug_mode
def check_sample_correctishness_b01c(f):
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
rng = np.random.RandomState([2012, 9, 26])
zv = rng.randn(batch_size, rows, cols,
channels).astype(config.floatX) * 2. - 3.
top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
channels).astype(config.floatX)
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.name = 'z_th'
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.name = 'top_down_th'
theano_rng = MRG_RandomStreams(rng.randint(2147462579))
p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,
theano_rng)
prob_func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = prob_func(zv, top_down_v)
sample_func = function([z_th, top_down_th], [p_sth, h_sth])
acc_p = 0. * pv
acc_h = 0. * hv
# make sure the test gets good coverage, ie, that it includes many
# different activation probs for both detector and pooling layer
buckets = 10
bucket_width = 1. / float(buckets)
for i in xrange(buckets):
lower_lim = i * bucket_width
upper_lim = (i+1) * bucket_width
assert np.any((pv >= lower_lim) * (pv < upper_lim))
assert np.any((hv >= lower_lim) * (hv < upper_lim))
assert upper_lim == 1.
for i in xrange(10000):
ps, hs = sample_func(zv, top_down_v)
assert ps.shape == pv.shape
assert hs.shape == hv.shape
acc_p += ps
acc_h += hs
est_p = acc_p / float(i+1)
est_h = acc_h / float(i+1)
pd = np.abs(est_p-pv)
hd = np.abs(est_h-hv)
"""
# plot maps of the estimation error, this is to see if it has
# some spatial pattern this is useful for detecting bugs like
# not handling the border correctly, etc.
from pylearn2.gui.patch_viewer import PatchViewer
pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),
is_color = False)
for i in xrange(pd.shape[0]):
for j in xrange(pd.shape[3]):
pv.add_patch((pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
pv = PatchViewer((hd.shape[0],hd.shape[3]),(hd.shape[1],hd.shape[2]),
is_color = False)
for i in xrange(hd.shape[0]):
for j in xrange(hd.shape[3]):
pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
"""
"""
plot expectation to estimate versus error in estimation
expect bigger errors for values closer to 0.5
from matplotlib import pyplot as plt
#nelem = reduce( lambda x, y : x*y, pd.shape)
#plt.scatter( pv.reshape(nelem), pd.reshape(nelem))
#plt.show()
nelem = reduce( lambda x, y : x*y, hd.shape)
plt.scatter( hv.reshape(nelem), hd.reshape(nelem))
plt.show()
"""
# don't really know how tight this should be
# but you can try to pose an equivalent problem
# and implement it in another way
# using a numpy implementation in softmax_acc.py
# I got a max error of .17
assert max(pd.max(), hd.max()) < .17
# Do exhaustive checks on just the last sample
assert np.all((ps == 0) + (ps == 1))
assert np.all((hs == 0) + (hs == 1))
for k in xrange(batch_size):
for i in xrange(ps.shape[1]):
for j in xrange(ps.shape[2]):
for l in xrange(channels):
p = ps[k, i, j, l]
h = hs[k, i*pool_rows:(i+1)*pool_rows,
j*pool_cols:(j+1)*pool_cols, l]
assert h.shape == (pool_rows, pool_cols)
assert p == h.max()
""" If you made it to here, it's correctish
(cant tell if samples are perfectly "correct") """
@no_debug_mode
def check_sample_correctishness_c01b(f):
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
rng = np.random.RandomState([2012, 9, 26])
zv = rng.randn(channels, rows, cols,
batch_size).astype(config.floatX) * 2. - 3.
top_down_v = rng.randn(channels, rows / pool_rows, cols / pool_cols,
batch_size).astype(config.floatX)
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.name = 'z_th'
z_th.tag.test_value = zv
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.name = 'top_down_th'
top_down_th.tag.test_value = top_down_v
theano_rng = MRG_RandomStreams(rng.randint(2147462579))
p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,
theano_rng)
prob_func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = prob_func(zv, top_down_v)
sample_func = function([z_th, top_down_th], [p_sth, h_sth])
acc_p = 0. * pv
acc_h = 0. * hv
# make sure the test gets good coverage, ie, that it includes
# many different activation probs for both detector and pooling layer
buckets = 10
bucket_width = 1. / float(buckets)
for i in xrange(buckets):
lower_lim = i * bucket_width
upper_lim = (i+1) * bucket_width
assert np.any((pv >= lower_lim) * (pv < upper_lim))
assert np.any((hv >= lower_lim) * (hv < upper_lim))
assert upper_lim == 1.
for i in xrange(10000):
ps, hs = sample_func(zv, top_down_v)
assert ps.shape == pv.shape
assert hs.shape == hv.shape
acc_p += ps
acc_h += hs
est_p = acc_p / float(i+1)
est_h = acc_h / float(i+1)
pd = np.abs(est_p-pv)
hd = np.abs(est_h-hv)
# don't really know how tight this should be
# but you can try to pose an equivalent problem
# and implement it in another way
# using a numpy implementation in softmax_acc.py
# I got a max error of .17
assert max(pd.max(), hd.max()) < .17
# Do exhaustive checks on just the last sample
assert np.all((ps == 0) + (ps == 1))
assert np.all((hs == 0) + (hs == 1))
for k in xrange(batch_size):
for i in xrange(ps.shape[1]):
for j in xrange(ps.shape[2]):
for l in xrange(channels):
p = ps[l, i, j, k]
h = hs[l, i*pool_rows:(i+1)*pool_rows,
j*pool_cols:(j+1)*pool_cols, k]
assert h.shape == (pool_rows, pool_cols)
assert p == h.max()
""" If you made it to here, it's correctish
(cant tell if samples are perfectly "correct") """
@no_debug_mode
def check_sample_correctishness_bc01(f):
"""
Tests that the sample mean converges to the conditional
expectation given by the function
Tests that p really is the max of the samples
Tests that at most one h in a group is on
"""
batch_size = 5
rows = 32
cols = 30
channels = 3
pool_rows = 2
pool_cols = 3
rng = np.random.RandomState([2012, 9, 26])
zv = rng.randn(batch_size, channels, rows,
cols).astype(config.floatX) * 2. - 3.
top_down_v = rng.randn(batch_size, channels, rows / pool_rows,
cols / pool_cols).astype(config.floatX)
z_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
z_th.tag.test_value = zv
z_th.name = 'z_th'
top_down_th = T.TensorType(broadcastable=(False, False, False, False),
dtype = config.floatX)()
top_down_th.tag.test_value = top_down_v
top_down_th.name = 'top_down_th'
theano_rng = MRG_RandomStreams(rng.randint(2147462579))
p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,
theano_rng)
prob_func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = prob_func(zv, top_down_v)
sample_func = function([z_th, top_down_th], [p_sth, h_sth])
acc_p = 0. * pv
acc_h = 0. * hv
# make sure the test gets good coverage, ie, that it includes many
# different activation probs for both detector and pooling layer
buckets = 10
bucket_width = 1. / float(buckets)
for i in xrange(buckets):
lower_lim = i * bucket_width
upper_lim = (i+1) * bucket_width
assert np.any((pv >= lower_lim) * (pv < upper_lim))
assert np.any((hv >= lower_lim) * (hv < upper_lim))
assert upper_lim == 1.
for i in xrange(10000):
ps, hs = sample_func(zv, top_down_v)
assert ps.shape == pv.shape
assert hs.shape == hv.shape
acc_p += ps
acc_h += hs
est_p = acc_p / float(i+1)
est_h = acc_h / float(i+1)
pd = np.abs(est_p-pv)
hd = np.abs(est_h-hv)
"""
# plot maps of the estimation error, this is to see if it has some
# spatial pattern this is useful for detecting bugs like not handling
# the border correctly, etc.
from pylearn2.gui.patch_viewer import PatchViewer
pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),
is_color = False)
for i in xrange(pd.shape[0]):
for j in xrange(pd.shape[3]):
pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
pv = PatchViewer((hd.shape[0],hd.shape[3]), (hd.shape[1],hd.shape[2]),
is_color = False)
for i in xrange(hd.shape[0]):
for j in xrange(hd.shape[3]):
pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
"""
"""
plot expectation to estimate versus error in estimation
expect bigger errors for values closer to 0.5
from matplotlib import pyplot as plt
#nelem = reduce( lambda x, y : x*y, pd.shape)
#plt.scatter( pv.reshape(nelem), pd.reshape(nelem))
#plt.show()
nelem = reduce( lambda x, y : x*y, hd.shape)
plt.scatter( hv.reshape(nelem), hd.reshape(nelem))
plt.show()
"""
# don't really know how tight this should be
# but you can try to pose an equivalent problem
# and implement it in another way
# using a numpy implementation in softmax_acc.py
# I got a max error of .17
assert max(pd.max(), hd.max()) < .17
# Do exhaustive checks on just the last sample
assert np.all((ps == 0) + (ps == 1))
assert np.all((hs == 0) + (hs == 1))
for k in xrange(batch_size):
for i in xrange(ps.shape[2]):
for j in xrange(ps.shape[3]):
for l in xrange(channels):
p = ps[k, l, i, j]
h = hs[k, l, i*pool_rows:(i+1)*pool_rows,
j*pool_cols:(j+1)*pool_cols]
assert h.shape == (pool_rows, pool_cols)
assert p == h.max()
assert h.sum() <= 1
""" If you made it to here, it's correctish
(cant tell if samples are perfectly "correct") """
@no_debug_mode
def check_sample_correctishness_channelwise(f):
"""
Tests that the sample mean converges to the conditional expectation given
by the function Tests that p really is the max of the samples tests that
at most one h in a group is on
"""
batch_size = 27
pool_size = 4
n = pool_size * 21
rng = np.random.RandomState([2012, 9, 26])
zv = rng.randn(batch_size, n).astype(config.floatX) * 3.5 - 5.
top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)
z_th = T.matrix()
z_th.tag.test_value = zv
z_th.name = 'z_th'
top_down_th = T.matrix()
top_down_th.tag.test_value = top_down_v
top_down_th.name = 'top_down_th'
theano_rng = MRG_RandomStreams(rng.randint(2147462579))
p_th, h_th, p_sth, h_sth = f(z_th, pool_size, top_down_th, theano_rng)
prob_func = function([z_th, top_down_th], [p_th, h_th])
pv, hv = prob_func(zv, top_down_v)
sample_func = function([z_th, top_down_th], [p_sth, h_sth])
acc_p = 0. * pv
acc_h = 0. * hv
# make sure the test gets good coverage, ie, that it includes
# many different activation probs for both detector and pooling layer
buckets = 10
bucket_width = 1. / float(buckets)
print(pv.min(), pv.max())
print(hv.min(), hv.max())
for i in xrange(buckets):
lower_lim = i * bucket_width
upper_lim = (i+1) * bucket_width
print(lower_lim, upper_lim)
assert np.any((pv >= lower_lim) * (pv < upper_lim))
assert np.any((hv >= lower_lim) * (hv < upper_lim))
assert upper_lim == 1.
for i in xrange(10000):
ps, hs = sample_func(zv, top_down_v)
assert ps.shape == pv.shape
assert hs.shape == hv.shape
acc_p += ps
acc_h += hs
est_p = acc_p / float(i+1)
est_h = acc_h / float(i+1)
pd = np.abs(est_p-pv)
hd = np.abs(est_h-hv)
"""
# plot maps of the estimation error, this is to see if it has some
# spatial pattern this is useful for detecting bugs like not handling
# the border correctly, etc.
# from pylearn2.gui.patch_viewer import PatchViewer
pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),
is_color = False)
for i in xrange(pd.shape[0]):
for j in xrange(pd.shape[3]):
pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
pv = PatchViewer((hd.shape[0],hd.shape[3]),(hd.shape[1],hd.shape[2]),
is_color = False)
for i in xrange(hd.shape[0]):
for j in xrange(hd.shape[3]):
pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)
pv.show()
"""
"""
plot expectation to estimate versus error in estimation
expect bigger errors for values closer to 0.5
from matplotlib import pyplot as plt
#nelem = reduce( lambda x, y : x*y, pd.shape)
#plt.scatter( pv.reshape(nelem), pd.reshape(nelem))
#plt.show()
nelem = reduce( lambda x, y : x*y, hd.shape)
plt.scatter( hv.reshape(nelem), hd.reshape(nelem))
plt.show()
"""
# don't really know how tight this should be
# but you can try to pose an equivalent problem
# and implement it in another way
# using a numpy implementation in softmax_acc.py
# I got a max error of .17
assert max(pd.max(), hd.max()) < .17
# Do exhaustive checks on just the last sample
assert np.all((ps == 0) + (ps == 1))
assert np.all((hs == 0) + (hs == 1))
for k in xrange(batch_size):
for i in xrange(ps.shape[1]):
p = ps[k, i]
h = hs[k, i*pool_size:(i+1)*pool_size]
assert h.shape == (pool_size,)
assert p == h.max()
assert h.sum() <= 1
""" If you made it to here, it's correctish
(cant tell if samples are perfectly "correct") """
def test_max_pool_channels():
check_correctness_channelwise(max_pool_channels)
def test_max_pool_channels_sigmoid():
check_correctness_sigmoid_channelwise(max_pool_channels)
def test_max_pool_channels_samples():
check_sample_correctishness_channelwise(max_pool_channels)
def test_max_pool():
check_correctness_bc01(max_pool)
def test_max_pool_c01b():
check_correctness_c01b(max_pool_c01b)
def test_max_pool_samples():
check_sample_correctishness_bc01(max_pool)
def test_max_pool_b01c_samples():
check_sample_correctishness_b01c(max_pool_b01c)
def test_max_pool_c01b_samples():
check_sample_correctishness_c01b(max_pool_c01b)
def test_max_pool_b01c():
check_correctness(max_pool_b01c)
def test_max_pool_unstable():
check_correctness(max_pool_unstable)
def test_max_pool_softmax_op():
check_correctness(max_pool_softmax_op)
def test_max_pool_softmax_with_bias_op():
check_correctness(max_pool_softmax_with_bias_op)
| bsd-3-clause |
IntelLabs/hpat | examples/series/series_take.py | 1 | 1703 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_take():
series = pd.Series([5, 4, 3, 2, 1])
return series.take([4, 1]) # Expect series of 4, 1
print(series_take())
| bsd-2-clause |
juharris/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 6 | 4216 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using convolutional networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(x, y):
"""Character level convolutional neural network model to predict classes."""
y = tf.one_hot(y, 15, 1, 0)
byte_list = tf.reshape(learn.ops.one_hot_matrix(x, 256),
[-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(byte_list, N_FILTERS,
FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,
FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
prediction, loss = learn.models.logistic_regression(pool2, y)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS = parser.parse_args()
tf.app.run()
| apache-2.0 |
jaidevd/scikit-learn | sklearn/feature_selection/tests/test_mutual_info.py | 30 | 6881 | from __future__ import division
import numpy as np
from numpy.testing import run_module_suite
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_array_equal, assert_almost_equal,
assert_false, assert_raises, assert_equal,
assert_allclose, assert_greater)
from sklearn.feature_selection.mutual_info_ import (
mutual_info_regression, mutual_info_classif, _compute_mi)
def test_compute_mi_dd():
# In discrete case computations are straightforward and can be done
# by hand on given vectors.
x = np.array([0, 1, 1, 0, 0])
y = np.array([1, 0, 0, 0, 1])
H_x = H_y = -(3/5) * np.log(3/5) - (2/5) * np.log(2/5)
H_xy = -1/5 * np.log(1/5) - 2/5 * np.log(2/5) - 2/5 * np.log(2/5)
I_xy = H_x + H_y - H_xy
assert_almost_equal(_compute_mi(x, y, True, True), I_xy)
def test_compute_mi_cc():
# For two continuous variables a good approach is to test on bivariate
# normal distribution, where mutual information is known.
# Mean of the distribution, irrelevant for mutual information.
mean = np.zeros(2)
# Setup covariance matrix with correlation coeff. equal 0.5.
sigma_1 = 1
sigma_2 = 10
corr = 0.5
cov = np.array([
[sigma_1**2, corr * sigma_1 * sigma_2],
[corr * sigma_1 * sigma_2, sigma_2**2]
])
# True theoretical mutual information.
I_theory = (np.log(sigma_1) + np.log(sigma_2) -
0.5 * np.log(np.linalg.det(cov)))
np.random.seed(0)
Z = np.random.multivariate_normal(mean, cov, size=1000)
x, y = Z[:, 0], Z[:, 1]
# Theory and computed values won't be very close, assert that the
# first figures after decimal point match.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, False, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd():
# To test define a joint distribution as follows:
# p(x, y) = p(x) p(y | x)
# X ~ Bernoulli(p)
# (Y | x = 0) ~ Uniform(-1, 1)
# (Y | x = 1) ~ Uniform(0, 2)
# Use the following formula for mutual information:
# I(X; Y) = H(Y) - H(Y | X)
# Two entropies can be computed by hand:
# H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2)
# H(Y | X) = ln(2)
# Now we need to implement sampling from out distribution, which is
# done easily using conditional distribution logic.
n_samples = 1000
np.random.seed(0)
for p in [0.3, 0.5, 0.7]:
x = np.random.uniform(size=n_samples) > p
y = np.empty(n_samples)
mask = x == 0
y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
I_theory = -0.5 * ((1 - p) * np.log(0.5 * (1 - p)) +
p * np.log(0.5 * p) + np.log(0.5)) - np.log(2)
# Assert the same tolerance.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, True, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd_unique_label():
# Test that adding unique label doesn't change MI.
n_samples = 100
x = np.random.uniform(size=n_samples) > 0.5
y = np.empty(n_samples)
mask = x == 0
y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
mi_1 = _compute_mi(x, y, True, False)
x = np.hstack((x, 2))
y = np.hstack((y, 10))
mi_2 = _compute_mi(x, y, True, False)
assert_equal(mi_1, mi_2)
# We are going test that feature ordering by MI matches our expectations.
def test_mutual_info_classif_discrete():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]])
y = np.array([0, 1, 2, 2, 1])
# Here X[:, 0] is the most informative feature, and X[:, 1] is weakly
# informative.
mi = mutual_info_classif(X, y, discrete_features=True)
assert_array_equal(np.argsort(-mi), np.array([0, 2, 1]))
def test_mutual_info_regression():
# We generate sample from multivariate normal distribution, using
# transformation from initially uncorrelated variables. The zero
# variables after transformation is selected as the target vector,
# it has the strongest correlation with the variable 2, and
# the weakest correlation with the variable 1.
T = np.array([
[1, 0.5, 2, 1],
[0, 1, 0.1, 0.0],
[0, 0.1, 1, 0.1],
[0, 0.1, 0.1, 1]
])
cov = T.dot(T.T)
mean = np.zeros(4)
np.random.seed(0)
Z = np.random.multivariate_normal(mean, cov, size=1000)
X = Z[:, 1:]
y = Z[:, 0]
mi = mutual_info_regression(X, y, random_state=0)
assert_array_equal(np.argsort(-mi), np.array([1, 2, 0]))
def test_mutual_info_classif_mixed():
# Here the target is discrete and there are two continuous and one
# discrete feature. The idea of this test is clear from the code.
np.random.seed(0)
X = np.random.rand(1000, 3)
X[:, 1] += X[:, 0]
y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int)
X[:, 2] = X[:, 2] > 0.5
mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3,
random_state=0)
assert_array_equal(np.argsort(-mi), [2, 0, 1])
for n_neighbors in [5, 7, 9]:
mi_nn = mutual_info_classif(X, y, discrete_features=[2],
n_neighbors=n_neighbors, random_state=0)
# Check that the continuous values have an higher MI with greater
# n_neighbors
assert_greater(mi_nn[0], mi[0])
assert_greater(mi_nn[1], mi[1])
# The n_neighbors should not have any effect on the discrete value
# The MI should be the same
assert_equal(mi_nn[2], mi[2])
def test_mutual_info_options():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]], dtype=float)
y = np.array([0, 1, 2, 2, 1], dtype=float)
X_csr = csr_matrix(X)
for mutual_info in (mutual_info_regression, mutual_info_classif):
assert_raises(ValueError, mutual_info_regression, X_csr, y,
discrete_features=False)
mi_1 = mutual_info(X, y, discrete_features='auto', random_state=0)
mi_2 = mutual_info(X, y, discrete_features=False, random_state=0)
mi_3 = mutual_info(X_csr, y, discrete_features='auto',
random_state=0)
mi_4 = mutual_info(X_csr, y, discrete_features=True,
random_state=0)
assert_array_equal(mi_1, mi_2)
assert_array_equal(mi_3, mi_4)
assert_false(np.allclose(mi_1, mi_3))
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
algorithmic-music-exploration/amen | tests/test_feature.py | 1 | 5779 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import librosa
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from amen.audio import Audio
from amen.feature import Feature
from amen.feature import FeatureCollection
from amen.timing import TimeSlice
from amen.utils import example_audio_file
from amen.exceptions import FeatureError
EXAMPLE_FILE = example_audio_file()
audio = Audio(EXAMPLE_FILE)
test_times = np.linspace(0, 10, num=1000)
test_index = pd.to_timedelta(test_times, unit='s')
test_dataframe = pd.DataFrame(data=audio.analysis_samples[:1000], index=test_index)
test_feature = Feature(test_dataframe)
def test_data_validation():
with pytest.raises(AssertionError):
f = Feature([1, 2, 3])
def test_data():
assert_frame_equal(test_feature.data, test_dataframe)
def test_default_aggregate():
assert test_feature.aggregate == np.mean
def test_default_base():
assert test_feature.base == None
def test_default_name():
assert test_feature.name == test_dataframe.keys()[0]
def test_aggregate():
test_feature = Feature(test_dataframe, aggregate=np.median)
assert test_feature.aggregate == np.median
def test_base():
base_feature = Feature(test_dataframe)
test_feature = Feature(test_dataframe, base=base_feature)
assert test_feature.base == base_feature
def test_base_validation():
with pytest.raises(AssertionError):
f = Feature(test_dataframe, np.mean, [1, 2, 3])
# Test list wrappers
def test_iter():
looped_data = []
for d in test_feature:
looped_data.append(d)
assert looped_data == test_feature.data[test_feature.name].tolist()
def test_getitem():
assert test_feature[0] == test_feature.data[test_feature.name][0]
# Test __repr__
def test_repr():
repr_string = '<Feature, {0}>'.format((test_feature.name))
assert test_feature.__repr__() == repr_string
# Test at()
time_slices = [TimeSlice(0, 0.5, audio), TimeSlice(1, 0.5, audio)]
feature_at = test_feature.at(time_slices)
test_slice = time_slices[0]
slice_index = (test_slice.time <= test_feature.data.index) & (
test_feature.data.index < test_slice.time + test_slice.duration
)
target_data = test_feature.aggregate(test_feature.data[slice_index], axis=0)
def test_default_aggregate():
assert feature_at.aggregate == test_feature.aggregate
def test_default_base():
assert feature_at.base == test_feature
def test_default_data():
assert feature_at.data.loc[test_slice.time].all() == target_data.all()
def test_default_length():
assert len(feature_at.data) == len(time_slices)
def test_base_with_second_resample():
feature_again = feature_at.at(time_slices[0])
assert feature_at.base == test_feature
def test_base_with_second_resample():
feature_again = feature_at.at(time_slices[0])
assert feature_again.data.loc[test_slice.time].all() == target_data.all()
def test_with_single_slice():
feature_at = test_feature.at(time_slices[0])
assert len(feature_at.data) == 1
# Test with_time
def test_with_time_raises():
def test():
with pytest.raises(FeatureError):
for beat, feature in test_feature.with_time():
pass
def test_with_time_beats():
beats = []
for beat, feature in feature_at.with_time():
beats.append(beat)
assert beats == time_slices
def test_with_time_features():
looped_features = []
for feature in feature_at:
looped_features.append(feature)
features = []
for beat, feature in feature_at.with_time():
features.append(feature)
assert features == looped_features
# Test FeatureCollection
feature_collection = FeatureCollection()
feature_collection['test'] = test_feature
feature_collection['another_test'] = test_feature
def test_iter():
looped_data = []
for data in feature_collection:
looped_data.append(data)
test_data = []
length = len(test_feature)
for i in range(length):
res = {}
for key, feature in feature_collection.items():
res[key] = feature.data[feature.name][i]
test_data.append(res)
assert res == looped_data[i]
def test_len():
key = list(feature_collection.keys())[0]
length = len(feature_collection[key])
assert len(feature_collection) == length
def test_at():
feature_collection_at = feature_collection.at(time_slices)
assert (
feature_collection_at['test'].data.loc[test_slice.time].all()
== target_data.all()
)
def test_get():
# Casting to list for Python 3
new_feature_collection = feature_collection.get('another_test')
assert list(new_feature_collection.keys()) == ['another_test']
def test_get_with_list():
# Casting to list for Python 3
new_feature_collection = feature_collection.get(['another_test'])
assert list(new_feature_collection.keys()) == ['another_test']
# Test with_time
def test_feature_collection_with_time_raises():
def test():
with pytest.raises(FeatureError):
for beat, feature in feature_collection.with_time():
pass
def test_feature_collection_with_time_beats():
feature_collection_at = feature_collection.at(time_slices)
beats = []
for beat, feature in feature_collection_at.with_time():
beats.append(beat)
assert beats == time_slices
def test_feature_collection_with_time_features():
feature_collection_at = feature_collection.at(time_slices)
looped_features = []
for feature in feature_collection_at:
looped_features.append(feature)
features = []
for beat, feature in feature_collection_at.with_time():
features.append(feature)
assert features == looped_features
| bsd-2-clause |
jperla/happynews | model/ppc.py | 1 | 11242 | #!/usr/bin/env python
"""
Functions useful in doing Posterior Predictive Checks of models.
See Gelman et al. 1996.
Uses graphlib and topiclib.
Copyright (C) 2011 Joseph Perla
GNU Affero General Public License. See <http://www.gnu.org/licenses/>.
"""
import math
from itertools import izip
import numpy as np
import jsondata
import graphlib
import topiclib
class PosteriorPredictiveChecks(object):
def __init__(self):
pass
def simulate(self, posterior, observed):
"""Accepts the posterior model parameters.
Also accepts the observed value,
but only to figure out the auxiliary variables
(like number of words to generate in this document).
Returns a new simulated observation from the posterior e.g. using MCMC.
"""
raise NotImplementedError
def observed_norm(self, observed):
"""Accepts the observed data.
Returns a real number.
e.g. in Linear regression, return the observed y
"""
raise NotImplementedError
def posterior_norm(self, posterior):
"""Accepts the observed data.
Returns a real number.
e.g. in Linear regression, return the Ax+b predicted value without noise
"""
raise NotImplementedError
def discrepancy(self, posterior, observed):
"""Accepts the posterior model parameters, and observed values.
These may be real observed values, or simulated.
Returns a real number.
e.g. in Linear regression, would be standardized residual
"""
raise NotImplementedError
def iterpost(self, global_params, local_params, observed_values):
for (local, observed) in izip(local_params, observed_values):
posterior = {}
posterior.update(global_params)
posterior.update(local)
yield posterior, local, observed
def scatterplot(self, global_params, local_params, observed_values):
"""Accepts global parameters dictionary.
Also accepts a list of dictionaries of local parameters.
Finally a list of observed values which should be same size as local parameters.
Generates a list of 2-tuples (D(y, theta), D(yrep, theta)).
Can be used to generate p-value graph to check model fit.
"""
output = []
for posterior, local, observed in self.iterpost(global_params, local_params, observed_values):
Dy = self.discrepancy(posterior, observed)
Dyrep = self.discrepancy(posterior, self.simulate(posterior, observed))
output.append((Dy, Dyrep))
return list(sorted(output))
def min_discrepancy_histogram(self, global_params, local_params, observed_values):
# NOTE: I misunderstood this graph, This is wrong.
# Computing this is much harder, and requires fitting the simulation.
"""Accepts global parameters dictionary.
Also accepts a list of dictionaries of local parameters.
Finally a list of observed values which should be same size as local parameters.
Generates a histogram of discrepancies for Dmin
See Gelman et al. 1996
Returns 2-tuple of (float, [floats]).
Representing the minimum discrepancy of observation to its posterior,
and a list of the simulated discrepancies from that same minimum posterior.
"""
# first find Dmin
minimum_discrepancy = float('inf')
minimum = (None, None)
for posterior, local, observed in self.iterpost(global_params, local_params, observed_values):
d = self.discrepancy(posterior, observed)
if d < minimum_discrepancy:
minimum_discrepancy = d
minimum = (local, observed)
# now simulate 1000 tosses
simulations = 1000
min_posterior = {}
min_posterior.update(global_params)
min_posterior.update(minimum[0])
simulated = [self.simulate(min_posterior, minimum[1]) for i in xrange(simulations)]
# now return all discrepancies
return (minimum_discrepancy, [self.discrepancy(min_posterior, s) for s in simulated])
def simulated_lines(self, global_params, local_params, observed_values, num_points, num_lines):
"""Accepts global parameters dictionary.
Also accepts a list of dictionaries of local parameters.
Finally a list of observed values which should be same size as local parameters.
See Gelman et al. 1996
Generates a series of plots which can be graphed to see
over which types of data the model may go wrong.
"""
def generate_linegraph(global_params, local_params, observed_values):
line = [(self.posterior_norm(p), self.observed_norm(o))
for p,l,o in self.iterpost(global_params, local_params, observed_values)]
return list(sorted(line))
# shrink number of points
p = zip(local_params, observed_values)
np.random.shuffle(p)
p = p[:num_points]
local_params,observed_values = [a[0] for a in p], [a[1] for a in p]
real_line = generate_linegraph(global_params, local_params, observed_values)
simulated = [[self.simulate(p, o)
for p,l,o in self.iterpost(global_params, local_params, observed_values)]
for i in xrange(num_lines)]
simulated_lines = [generate_linegraph(global_params, local_params, s) for s in simulated]
return (real_line, simulated_lines)
class TLCPPC(PosteriorPredictiveChecks):
def simulate(self, posterior, observed):
"""Accepts posterior, which is dictionary of phi, beta, eta, sigma squared.
Observed is a sparse vector of word, list of (word int,count) 2-tuples.
Returns observation in same sparse vector type.
"""
# number of words to generate
No = sum(o[1] for o in observed)
beta = posterior['beta']
phi = posterior['phi']
N,K = phi.shape
assert No == N
assert K == beta.shape[0]
K,W = beta.shape
topics = np.sum(np.array([np.random.multinomial(1, phi[n]) for n in xrange(N)]), axis=0)
assert len(topics) == K
words = np.sum(np.array([np.random.multinomial(count, beta[k]) for k,count in enumerate(topics)]), axis=0)
assert len(words) == W
return [(w,c) for w,c in enumerate(words)]
def posterior_norm(self, posterior):
"""Accepts posterior, which is dictionary of phi, beta, eta, sigma squared.
Returns real number.
"""
eta = posterior['eta']
phi = posterior['phi']
# partial slda, so only use first few topics
N,K = phi.shape
Ks = len(eta)
partial_phi = phi[:,:Ks]
EZ = np.sum(partial_phi, axis=0) / N
assert len(EZ) == len(eta)
return np.dot(eta, EZ)
def observed_norm(self, observed):
"""How to generate a statistic based on text is
probably different for each application.
"""
raise NotImplementedError
def discrepancy(self, posterior, observed):
"""Accepts posterior, which is dictionary of phi, beta, eta, sigma squared.
Observed is a sparse vector of word, list of (word int,count) 2-tuples.
Returns a real number.
Just uses observed and posterior norm divided by sigma squared.
"""
#TODO: jperla: maybe can generalize, sigma is a def standardizer() ?
s = np.sqrt(posterior['sigma_squared'])
return abs(self.posterior_norm(posterior) - self.observed_norm(observed)) / s
vocab = dict((w,i) for i,w in enumerate(jsondata.read('../data/nytimes_med_common_vocab.json')))
pos = jsondata.read('../data/liu_pos_words.json')
neg = jsondata.read('../data/liu_neg_words.json')
posi = set([vocab[w] for w in pos if w in vocab])
negi = set([vocab[w] for w in neg if w in vocab])
class YelpSentimentPartialSLDAPPC(TLCPPC):
def simulate(self, posterior, observed):
"""Accepts posterior vars which include phi and eta.
As well as observed value which is just a real number.
Returns a new observation.
Observation is from a normal from expected mean, like regression.
"""
s = np.sqrt(posterior['sigma_squared'])
mean = self.posterior_norm(posterior)
return np.random.normal(mean, s)
def observed_norm(self, observed):
"""Accepts a real value between -2 and 2.
Returns real number between -2 and 2.
Itself. This is just like regression.
"""
return observed
class YelpSentimentTLCPPC(TLCPPC):
def posterior_norm(self, posterior):
"""Posterior contains phiC, whose *last* topics contain the sentiment topics."""
p = posterior.copy()
phi = p['phi']
eta = p['eta']
Ks = len(eta)
p['phi'] = phi[:,-Ks:]
return TLCPPC.posterior_norm(self, p)
def observed_norm(self, observed):
"""Accepts a sparse vector of word, list of (word int,count) 2-tuples.
Returns real number between -2 and 2.
"""
numpos = 0
numneg = 0
for n,word,count in topiclib.iterwords(observed):
if word in posi:
numpos += 1
if word in negi:
numneg += 1
ratio = 1.0
normratio = 0.0
if numpos == 0 and numneg == 0:
return 0.0
elif numneg == 0:
return 2.0
elif numpos == 0:
return -2.0
else:
if numpos >= numneg:
ratio = float(numpos) / numneg
normratio = (ratio - 1)
else:
ratio = -1.0 * float(numneg) / numpos
normratio = (ratio + 1)
o = graphlib.logistic_sigmoid(normratio)
return (4 * o) - 2 # norm to -2 to 2
import matplotlib.pyplot as plot
def save_figures(name, scatterplot, lines):
#TODO: jperla: this needs to be better; cant hardcode everything
first = lambda x: list(a[0] for a in x)
second = lambda x: list(a[1] for a in x)
# make graphs
plot.figure(1)
plot.grid(True)
plot.scatter(first(scatterplot), second(scatterplot), 20, 'k', '.')
plot.axis([0, 3.5, 0, 3.5])
plot.plot([0, 3.5], [0, 3.5], ':')
plot.xlabel(r'D(y,$\theta$)')
plot.ylabel(r'D($y^{rep}$,$\theta$)')
plot.title('Posterior Predictive Check Scatterplot')
plot.savefig(name + '-ppc-scatterplot.png')
#plot.legend(('sample1','sample2'))
plot.figure(2)
plot.axis([-2, 2, -2, 2])
for p in lines[1]:
plot.plot(first(p), second(p), 'b-.')
plot.plot(first(lines[0]), second(lines[0]), 'k-')
plot.xlabel(r'$\eta^T E[\bar z]$')
#plot.xlabel(r'Positive vs Negative words')
plot.ylabel(r'Observed Rating')
plot.title('Posterior Predictive Check Simulated Draws')
plot.savefig(name + '-ppc-lines.png')
| agpl-3.0 |
daemonmaker/pylearn2 | pylearn2/scripts/plot_monitor.py | 37 | 10204 | #!/usr/bin/env python
"""
usage:
plot_monitor.py model_1.pkl model_2.pkl ... model_n.pkl
Loads any number of .pkl files produced by train.py. Extracts
all of their monitoring channels and prompts the user to select
a subset of them to be plotted.
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow, Harm Aarts"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import gc
import numpy as np
import sys
from theano.compat.six.moves import input, xrange
from pylearn2.utils import serial
from theano.printing import _TagGenerator
from pylearn2.utils.string_utils import number_aware_alphabetical_key
from pylearn2.utils import contains_nan, contains_inf
import argparse
channels = {}
def unique_substring(s, other, min_size=1):
"""
.. todo::
WRITEME
"""
size = min(len(s), min_size)
while size <= len(s):
for pos in xrange(0,len(s)-size+1):
rval = s[pos:pos+size]
fail = False
for o in other:
if o.find(rval) != -1:
fail = True
break
if not fail:
return rval
size += 1
# no unique substring
return s
def unique_substrings(l, min_size=1):
"""
.. todo::
WRITEME
"""
return [unique_substring(s, [x for x in l if x is not s], min_size)
for s in l]
def main():
"""
.. todo::
WRITEME
"""
parser = argparse.ArgumentParser()
parser.add_argument("--out")
parser.add_argument("model_paths", nargs='+')
parser.add_argument("--yrange", help='The y-range to be used for plotting, e.g. 0:1')
options = parser.parse_args()
model_paths = options.model_paths
if options.out is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
print('generating names...')
model_names = [model_path.replace('.pkl', '!') for model_path in
model_paths]
model_names = unique_substrings(model_names, min_size=10)
model_names = [model_name.replace('!','') for model_name in
model_names]
print('...done')
for i, arg in enumerate(model_paths):
try:
model = serial.load(arg)
except Exception:
if arg.endswith('.yaml'):
print(sys.stderr, arg + " is a yaml config file," +
"you need to load a trained model.", file=sys.stderr)
quit(-1)
raise
this_model_channels = model.monitor.channels
if len(sys.argv) > 2:
postfix = ":" + model_names[i]
else:
postfix = ""
for channel in this_model_channels:
channels[channel+postfix] = this_model_channels[channel]
del model
gc.collect()
while True:
# Make a list of short codes for each channel so user can specify them
# easily
tag_generator = _TagGenerator()
codebook = {}
sorted_codes = []
for channel_name in sorted(channels,
key = number_aware_alphabetical_key):
code = tag_generator.get_tag()
codebook[code] = channel_name
codebook['<'+channel_name+'>'] = channel_name
sorted_codes.append(code)
x_axis = 'example'
print('set x_axis to example')
if len(channels.values()) == 0:
print("there are no channels to plot")
break
# If there is more than one channel in the monitor ask which ones to
# plot
prompt = len(channels.values()) > 1
if prompt:
# Display the codebook
for code in sorted_codes:
print(code + '. ' + codebook[code])
print()
print("Put e, b, s or h in the list somewhere to plot " +
"epochs, batches, seconds, or hours, respectively.")
response = input('Enter a list of channels to plot ' + \
'(example: A, C,F-G, h, <test_err>) or q to quit' + \
' or o for options: ')
if response == 'o':
print('1: smooth all channels')
print('any other response: do nothing, go back to plotting')
response = input('Enter your choice: ')
if response == '1':
for channel in channels.values():
k = 5
new_val_record = []
for i in xrange(len(channel.val_record)):
new_val = 0.
count = 0.
for j in xrange(max(0, i-k), i+1):
new_val += channel.val_record[j]
count += 1.
new_val_record.append(new_val / count)
channel.val_record = new_val_record
continue
if response == 'q':
break
#Remove spaces
response = response.replace(' ','')
#Split into list
codes = response.split(',')
final_codes = set([])
for code in codes:
if code == 'e':
x_axis = 'epoch'
continue
elif code == 'b':
x_axis = 'batche'
elif code == 's':
x_axis = 'second'
elif code == 'h':
x_axis = 'hour'
elif code.startswith('<'):
assert code.endswith('>')
final_codes.add(code)
elif code.find('-') != -1:
#The current list element is a range of codes
rng = code.split('-')
if len(rng) != 2:
print("Input not understood: "+code)
quit(-1)
found = False
for i in xrange(len(sorted_codes)):
if sorted_codes[i] == rng[0]:
found = True
break
if not found:
print("Invalid code: "+rng[0])
quit(-1)
found = False
for j in xrange(i,len(sorted_codes)):
if sorted_codes[j] == rng[1]:
found = True
break
if not found:
print("Invalid code: "+rng[1])
quit(-1)
final_codes = final_codes.union(set(sorted_codes[i:j+1]))
else:
#The current list element is just a single code
final_codes = final_codes.union(set([code]))
# end for code in codes
else:
final_codes ,= set(codebook.keys())
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
styles = list(colors)
styles += [color+'--' for color in colors]
styles += [color+':' for color in colors]
fig = plt.figure()
ax = plt.subplot(1,1,1)
# plot the requested channels
for idx, code in enumerate(sorted(final_codes)):
channel_name= codebook[code]
channel = channels[channel_name]
y = np.asarray(channel.val_record)
if contains_nan(y):
print(channel_name + ' contains NaNs')
if contains_inf(y):
print(channel_name + 'contains infinite values')
if x_axis == 'example':
x = np.asarray(channel.example_record)
elif x_axis == 'batche':
x = np.asarray(channel.batch_record)
elif x_axis == 'epoch':
try:
x = np.asarray(channel.epoch_record)
except AttributeError:
# older saved monitors won't have epoch_record
x = np.arange(len(channel.batch_record))
elif x_axis == 'second':
x = np.asarray(channel.time_record)
elif x_axis == 'hour':
x = np.asarray(channel.time_record) / 3600.
else:
assert False
ax.plot( x,
y,
styles[idx % len(styles)],
marker = '.', # add point margers to lines
label = channel_name)
plt.xlabel('# '+x_axis+'s')
ax.ticklabel_format( scilimits = (-3,3), axis = 'both')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc = 'upper left',
bbox_to_anchor = (1.05, 1.02))
# Get the axis positions and the height and width of the legend
plt.draw()
ax_pos = ax.get_position()
pad_width = ax_pos.x0 * fig.get_size_inches()[0]
pad_height = ax_pos.y0 * fig.get_size_inches()[1]
dpi = fig.get_dpi()
lgd_width = ax.get_legend().get_frame().get_width() / dpi
lgd_height = ax.get_legend().get_frame().get_height() / dpi
# Adjust the bounding box to encompass both legend and axis. Axis should be 3x3 inches.
# I had trouble getting everything to align vertically.
ax_width = 3
ax_height = 3
total_width = 2*pad_width + ax_width + lgd_width
total_height = 2*pad_height + np.maximum(ax_height, lgd_height)
fig.set_size_inches(total_width, total_height)
ax.set_position([pad_width/total_width, 1-6*pad_height/total_height, ax_width/total_width, ax_height/total_height])
if(options.yrange is not None):
ymin, ymax = map(float, options.yrange.split(':'))
plt.ylim(ymin, ymax)
if options.out is None:
plt.show()
else:
plt.savefig(options.out)
if not prompt:
break
if __name__ == "__main__":
main()
| bsd-3-clause |
tedunderwood/fiction | code/logisticpredict.py | 1 | 36904 | # logisticpredict.py
#
# Based on logisticleave1out.py which was based on
# parallel_crossvalidate.py from the paceofchange repo.
#
# Reads all volumes meeting a given set of criteria,
# and uses a leave-one-out strategy to distinguish
# reviewed volumes (class 1) from random
# (class 0). In cases where an author occurs more
# than once in the dataset, it leaves out all
# volumes by that author whenever making a prediction
# about one of them.
#
# This version differs from parallel_crossvalidate
# in using a different metadata structure, and
# especially a multi-tag folksonomic system for
# identifying the positive and negative classes.
# In other words, volumes aren't explicitly divided
# into positive and negative classes in the metadata;
# they can carry any number of tags; you decide, when
# you run the model, which tags you want to group as
# positive and negative classes. The code will ensure
# that no volumes with a positive tag are present in
# the negative class, and also ensure that the two
# groups have roughly similar distributions across
# the timeline.
#
# The main class here is create_model().
# It accepts three parameters, each of which is a tuple
# that gets unpacked.
#
# There are unfortunately a lot of details in those tuples,
# because I've written this script to be very flexible and
# permit a lot of different kinds of modeling.
#
# paths unpacks into
# sourcefolder, extension, metadatapath, outputpath, vocabpath
# where
# sourcefolder is the directory with data files
# extension is the extension those files end with
# metadatapath is the path to a metadata csv
# outputpath is the path to a csv of results to be written
# and vocabpath is the path to a file of words to be used
# as features for all models
#
# exclusions unpacks into
# excludeif, excludeifnot, excludebelow, excludeabove, sizecap
# where
# all the "excludes" are dictionaries pairing a key (the name of a metadata
# column) with a value that should be excluded -- if it's present,
# absent, lower than this, or higher than this.
# sizecap limits the number of vols in the positive class; randomly
# sampled if greater.
#
# classifyconditions unpacks into:
# positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions
# where
# positive_tags is a list of tags to be included in the positive class
# negative_tags is a list of tags to be selected for the negative class
# (unless volume also has a positive_tag, and note that the negative class
# is always selected to match the chronological distribution of the positive
# as closely as possible)
# datetype is the date column to be used for chronological distribution
# numfeatures can be used to limit the features in this model to top N;
# it is in practice not functional right now because I'm using all
# features in the vocab file -- originally selected by doc frequency in
# the whole corpus
# regularization is a constant to be handed to scikit-learn (I'm using one
# established in previous experiments on a different corpus)
# and testconditions ... is complex.
#
# The variable testconditions will be a set of tags. It may contain tags for classes
# that are to be treated as a test set. Positive volumes will be assigned to
# this set if they have no positive tags that are *not* in testconditions.
# A corresponding group of negative volumes will at the same time
# be assigned. It can also contain two integers to be interpreted as dates, a
# pastthreshold and futurethreshold. Dates outside these thresholds will not
# be used for training. If date thresholds are provided they must be provided
# as a pair to clarify which one is the pastthreshold and which the future.
# If you're only wanting to exclude volumes in the future, provide a past
# threshold like "1."
# All of these conditions exclude volumes from the training set, and place them
# in a set that is used only for testing. But also note that these
# exclusions are always IN ADDITION TO leave-one-out crossvalidation by author.
# In other words, if an author w/ multiple volumes has only some of them excluded
# from training by testconditions, it is *still* the case that the author will never
# be in a training set when her own volumes are being predicted.
import numpy as np
import pandas as pd
import csv, os, random, sys, datetime
from collections import Counter
from multiprocessing import Pool
from sklearn.linear_model import LogisticRegression
# from scipy.stats import norm
import matplotlib.pyplot as plt
import modelingprocess
import metafilter
import metautils
usedate = False
# Leave this flag false unless you plan major
# surgery to reactivate the currently-deprecated
# option to use "date" as a predictive feature.
# There are three different date types we can use.
# Choose which here.
# FUNCTIONS GET DEFINED BELOW.
def get_features(wordcounts, wordlist):
numwords = len(wordlist)
wordvec = np.zeros(numwords)
for idx, word in enumerate(wordlist):
if word in wordcounts:
wordvec[idx] = wordcounts[word]
return wordvec
# In an earlier version of this script, we sometimes used
# "publication date" as a feature, to see what would happen.
# In the current version, we don't. Some of the functions
# and features remain, but they are deprecated. E.g.:
def get_features_with_date(wordcounts, wordlist, date, totalcount):
numwords = len(wordlist)
wordvec = np.zeros(numwords + 1)
for idx, word in enumerate(wordlist):
if word in wordcounts:
wordvec[idx] = wordcounts[word]
wordvec = wordvec / (totalcount + 0.0001)
wordvec[numwords] = date
return wordvec
def sliceframe(dataframe, yvals, excludedrows, testrow):
numrows = len(dataframe)
newyvals = list(yvals)
for i in excludedrows:
del newyvals[i]
# NB: This only works if we assume that excluded rows
# has already been sorted in descending order !!!!!!!
trainingset = dataframe.drop(dataframe.index[excludedrows])
newyvals = np.array(newyvals)
testset = dataframe.iloc[testrow]
return trainingset, newyvals, testset
def normalizearray(featurearray, usedate):
'''Normalizes an array by centering on means and
scaling by standard deviations. Also returns the
means and standard deviations for features, so that
they can be pickled.
'''
numinstances, numfeatures = featurearray.shape
means = list()
stdevs = list()
lastcolumn = numfeatures - 1
for featureidx in range(numfeatures):
thiscolumn = featurearray.iloc[ : , featureidx]
thismean = np.mean(thiscolumn)
thisstdev = np.std(thiscolumn)
if (not usedate) or featureidx != lastcolumn:
# If we're using date we don't normalize the last column.
means.append(thismean)
stdevs.append(thisstdev)
featurearray.iloc[ : , featureidx] = (thiscolumn - thismean) / thisstdev
else:
print('FLAG')
means.append(thismean)
thisstdev = 0.1
stdevs.append(thisstdev)
featurearray.iloc[ : , featureidx] = (thiscolumn - thismean) / thisstdev
# We set a small stdev for date.
return featurearray, means, stdevs
def binormal_select(vocablist, positivecounts, negativecounts, totalpos, totalneg, k):
''' A feature-selection option, not currently in use.
'''
all_scores = np.zeros(len(vocablist))
for idx, word in enumerate(vocablist):
# For each word we create a vector the length of vols in each class
# that contains real counts, plus zeroes for all those vols not
# represented.
positives = np.zeros(totalpos, dtype = 'int64')
if word in positivecounts:
positives[0: len(positivecounts[word])] = positivecounts[word]
negatives = np.zeros(totalneg, dtype = 'int64')
if word in negativecounts:
negatives[0: len(negativecounts[word])] = negativecounts[word]
featuremean = np.mean(np.append(positives, negatives))
tp = sum(positives > featuremean)
fp = sum(positives <= featuremean)
tn = sum(negatives > featuremean)
fn = sum(negatives <= featuremean)
tpr = tp/(tp+fn) # true positive ratio
fpr = fp/(fp+tn) # false positive ratio
bns_score = abs(norm.ppf(tpr) - norm.ppf(fpr))
# See Forman
if np.isinf(bns_score) or np.isnan(bns_score):
bns_score = 0
all_scores[idx] = bns_score
zipped = [x for x in zip(all_scores, vocablist)]
zipped.sort(reverse = True)
with open('bnsscores.tsv', mode='w', encoding = 'utf-8') as f:
for score, word in zipped:
f.write(word + '\t' + str(score) + '\n')
return [x[1] for x in zipped[0:k]]
def confirm_testconditions(testconditions, positive_tags):
for elem in testconditions:
if elem in positive_tags or elem.isdigit():
# that's fine
continue
elif elem == '':
# also okay
continue
elif elem == 'donotmatch':
print("You have instructed me that positive volumes matching only a")
print("positive tag in the test-but-not-train group should not be matched")
print("with negative volumes.")
elif elem.startswith('limit=='):
limit = elem.replace('limit==', '')
print()
print("You have instructed me to allow only "+ limit)
print("volumes in the do-not-train set.")
print()
else:
print('Illegal element in testconditions.')
sys.exit(0)
def get_thresholds(testconditions):
''' The testconditions are a set of elements that may include dates
(setting an upper and lower limit for training, outside of which
volumes are only to be in the test set), or may include genre tags.
This function only identifies the dates, if present. If not present,
it returns 0 and 3000. Do not use this code for predicting volumes
dated after 3000 AD. At that point, the whole thing is deprecated.
'''
thresholds = []
for elem in testconditions:
if elem.isdigit():
thresholds.append(int(elem))
thresholds.sort()
if len(thresholds) == 2:
pastthreshold = thresholds[0]
futurethreshold = thresholds[1]
else:
pastthreshold = 0
futurethreshold = 3000
# we are unlikely to have any volumes before or after
# those dates
return pastthreshold, futurethreshold
def get_volume_lists(volumeIDs, volumepaths, IDsToUse):
'''
This function creates an ordered list of volume IDs included in this
modeling process, and an ordered list of volume-path tuples.
It also identifies positive volumes that are not to be included in a training set,
because they belong to a category that is being tested.
'''
volspresent = []
orderedIDs = []
for volid, volpath in zip(volumeIDs, volumepaths):
if volid not in IDsToUse:
continue
else:
volspresent.append((volid, volpath))
orderedIDs.append(volid)
return volspresent, orderedIDs
def first_and_last(idset, metadict, datetype):
min = 3000
max = 0
for anid in idset:
date = metadict[anid][datetype]
if date < min:
min = date
if date > max:
max = date
return min, max
def describe_donttrainset(donttrainset, classdictionary, metadict, datetype):
positivedonts = []
negativedonts = []
for anid in donttrainset:
posneg = classdictionary[anid]
if posneg == 0:
negativedonts.append(anid)
elif posneg == 1:
positivedonts.append(anid)
else:
print('Anomaly in classdictionary.')
min, max = first_and_last(positivedonts, metadict, datetype)
if min > 0:
print("The set of volumes not to be trained on includes " + str(len(positivedonts)))
print("positive volumes, ranging from " + str(min) + " to " + str(max) + ".")
print()
min, max = first_and_last(negativedonts, metadict, datetype)
if min > 0:
print("And also includes " + str(len(negativedonts)))
print("negative volumes, ranging from " + str(min) + " to " + str(max) + ".")
print()
def record_trainflags(metadict, donttrainset):
''' This function records, for each volume, whether it is or is not
to be used in training. Important to run it after add_matching_negs so
that we know which volumes in the negative set were or weren't used
in training.
'''
for docid, metadata in metadict.items():
if docid in donttrainset:
metadata['trainflag'] = 0
else:
metadata['trainflag'] = 1
def make_vocablist(sourcedir, n, vocabpath):
'''
Makes a list of the top n words in sourcedir, and writes it
to vocabpath.
'''
sourcefiles = [x for x in os.listdir(sourcedir) if not x.startswith('.')]
wordcounts = Counter()
for afile in sourcefiles:
path = sourcedir + afile
with open(path, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
if len(fields) > 2 or len(fields) < 2:
continue
word = fields[0]
if len(word) > 0 and word[0].isalpha():
count = int(fields[1])
wordcounts[word] += 1
with open(vocabpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['word', 'docfreq'])
for word, count in wordcounts.most_common(n):
writer.writerow([word, count])
vocabulary = [x[0] for x in wordcounts.most_common(n)]
return vocabulary
def get_vocablist(vocabpath, sourcedir, wordcounts, useall, n):
'''
Gets the vocablist stored in vocabpath or, alternately, if that list
doesn't yet exist, it creates a vocablist and puts it there.
'''
vocablist = []
ctr = 0
if not os.path.isfile(vocabpath):
vocablist = make_vocablist(sourcedir, n, vocabpath)
else:
with open(vocabpath, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
ctr += 1
if ctr > n:
break
# this allows us to limit how deep we go
word = row['word'].strip()
if wordcounts[word] > 2 or useall:
vocablist.append(word)
if len(vocablist) > n:
vocablist = vocablist[0: n]
return vocablist
def get_docfrequency(volspresent, donttrainset):
'''
This function counts words in volumes. These wordcounts don't necessarily define
a feature set for modeling: at present, the limits of that set are defined primarily
by a fixed list shared across all models (top10k).
'''
wordcounts = Counter()
for volid, volpath in volspresent:
if volid in donttrainset:
continue
else:
with open(volpath, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
if len(fields) > 2 or len(fields) < 2:
# this is a malformed line; there are a few of them,
# but not enough to be important -- ignore
continue
word = fields[0]
if len(word) > 0 and word[0].isalpha():
wordcounts[word] += 1
# We're getting docfrequency (the number of documents that
# contain this word), not absolute number of word occurrences.
# So just add 1 no matter how many times the word occurs.
return wordcounts
def create_model(paths, exclusions, classifyconditions):
''' This is the main function in the module.
It can be called externally; it's also called
if the module is run directly.
'''
sourcefolder, extension, metadatapath, outputpath, vocabpath = paths
excludeif, excludeifnot, excludebelow, excludeabove, sizecap = exclusions
positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions = classifyconditions
verbose = False
holdout_authors = True
# If you want reliable results, always run this with holdout_authors
# set to True. The only reason to set it to False is to confirm that
# this flag is actually making a difference. If you do that, it
# disables the code that keeps other works by the author being predicted
# out of the training set.
# The following function confirms that the testconditions are legal.
confirm_testconditions(testconditions, positive_tags)
if not sourcefolder.endswith('/'):
sourcefolder = sourcefolder + '/'
# This just makes things easier.
# Get a list of files.
allthefiles = os.listdir(sourcefolder)
# random.shuffle(allthefiles)
volumeIDs = list()
volumepaths = list()
for filename in allthefiles:
if filename.endswith(extension):
volID = filename.replace(extension, "")
# The volume ID is basically the filename minus its extension.
# Extensions are likely to be long enough that there is little
# danger of accidental occurrence inside a filename. E.g.
# '.fic.tsv'
path = sourcefolder + filename
volumeIDs.append(volID)
volumepaths.append(path)
metadict = metafilter.get_metadata(metadatapath, volumeIDs, excludeif, excludeifnot, excludebelow, excludeabove)
# Now that we have a list of volumes with metadata, we can select the groups of IDs
# that we actually intend to contrast.
IDsToUse, classdictionary, donttrainset = metafilter.label_classes(metadict, "tagset", positive_tags, negative_tags, sizecap, datetype, excludeif, testconditions)
print()
min, max = first_and_last(IDsToUse, metadict, datetype)
if min > 0:
print("The whole corpus involved here includes " + str(len(IDsToUse)))
print("volumes, ranging in date from " + str(min) + " to " + str(max) + ".")
print()
# We now create an ordered list of id-path tuples for later use, and identify a set of
# positive ids that should never be used in training.
volspresent, orderedIDs = get_volume_lists(volumeIDs, volumepaths, IDsToUse)
# Extend the set of ids not to be used in training by identifying negative volumes that match
# the distribution of positive volumes.
describe_donttrainset(donttrainset, classdictionary, metadict, datetype)
# Create a flag for each volume that indicates whether it was used in training
record_trainflags(metadict, donttrainset)
# Get a count of docfrequency for all words in the corpus. This is probably not needed and
# might be deprecated later.
wordcounts = get_docfrequency(volspresent, donttrainset)
# The feature list we use is defined by the top 10,000 words (by document
# frequency) in the whole corpus, and it will be the same for all models.
vocablist = get_vocablist(vocabpath, sourcefolder, wordcounts, useall = True, n = numfeatures)
# This function either gets the vocabulary list already stored in vocabpath, or
# creates a list of the top 10k words in all files, and stores it there.
# N is a parameter that could be altered right here.
# Useall is a parameter that you basically don't need to worry about unless
# you're changing / testing code. If you set it to false, the vocablist will
# exclude words that occur very rarely. This shouldn't be necessary; the
# crossvalidation routine is designed not to include features that occur
# zero times in the training set. But if you get div-by-zero errors in the
# training process, you could fiddle with this parameter as part of a
# troubleshooting process.
numfeatures = len(vocablist)
# For each volume, we're going to create a list of volumes that should be
# excluded from the training set when it is to be predicted. More precisely,
# we're going to create a list of their *indexes*, so that we can easily
# remove rows from the training matrix.
# This list will include for ALL volumes, the indexes of vols in the donttrainset.
donttrainon = [orderedIDs.index(x) for x in donttrainset]
authormatches = [list(donttrainon) for x in range(len(orderedIDs))]
# Now we proceed to enlarge that list by identifying, for each volume,
# a set of indexes that have the same author. Obvs, there will always be at least one.
# We exclude a vol from it's own training set.
if holdout_authors:
for idx1, anid in enumerate(orderedIDs):
thisauthor = metadict[anid]['author']
for idx2, anotherid in enumerate(orderedIDs):
otherauthor = metadict[anotherid]['author']
if thisauthor == otherauthor and not idx2 in authormatches[idx1]:
authormatches[idx1].append(idx2)
else:
# This code only runs if we're testing the effect of
# holdout_authors by disabling it.
for idx1, anid in enumerate(orderedIDs):
if idx1 not in authormatches[idx1]:
authormatches[idx1].append(idx1)
# The purpose of everything that follows is to
# balance negative and positive instances in each
# training set.
trainingpositives = set()
trainingnegatives = set()
for anid, thisclass in classdictionary.items():
if anid in donttrainset:
continue
if thisclass == 1:
trainingpositives.add(orderedIDs.index(anid))
else:
trainingnegatives.add(orderedIDs.index(anid))
print('Training positives: ' + str(len(trainingpositives)))
print('Training negatives: ' + str(len(trainingnegatives)))
# The code below was intended to balance the size of positive and
# negative in spite of same-author exclusions. But it could
# have grossly unintended effects when there were many donttrainon
# exclusions.
# for alist in authormatches:
# numpositive = 0
# numnegative = 0
# for anidx in alist:
# anid = orderedIDs[anidx]
# thisclass = classdictionary[anid]
# if thisclass == 1:
# numpositive += 1
# else:
# numnegative += 1
# if numpositive > numnegative:
# difference = numpositive - numnegative
# remaining = trainingnegatives - set(alist)
# alist.extend(random.sample(remaining, difference))
# elif numpositive < numnegative:
# difference = numnegative - numpositive
# remaining = trainingpositives - set(alist)
# alist.extend(random.sample(remaining, difference))
# else:
# difference = 0
# Let's record, for each volume, the size of its training set.
trainingsizes = []
numvolumes = len(orderedIDs)
for idx, anid in enumerate(orderedIDs):
excluded = len(authormatches[idx])
metadict[anid]['trainsize'] = numvolumes - excluded
trainingsizes.append(metadict[anid]['trainsize'])
averagetrainingsize = sum(trainingsizes) / len(trainingsizes)
for alist in authormatches:
alist.sort(reverse = True)
# I am reversing the order of indexes so that I can delete them from
# back to front, without changing indexes yet to be deleted.
# This will become important in the modelingprocess module.
volsizes = dict()
voldata = list()
classvector = list()
for volid, volpath in volspresent:
with open(volpath, encoding = 'utf-8') as f:
voldict = dict()
totalcount = 0
for line in f:
fields = line.strip().split('\t')
if len(fields) > 2 or len(fields) < 2:
continue
word = fields[0]
count = int(fields[1])
voldict[word] = count
totalcount += count
date = metautils.infer_date(metadict[volid], datetype)
date = date - 1700
if date < 0:
date = 0
if usedate:
features = get_features_with_date(voldict, vocablist, date, totalcount)
voldata.append(features)
else:
features = get_features(voldict, vocablist)
if totalcount == 0:
totalcount = .00001
voldata.append(features / totalcount)
volsizes[volid] = totalcount
classflag = classdictionary[volid]
classvector.append(classflag)
data = pd.DataFrame(voldata)
sextuplets = list()
for i, volid in enumerate(orderedIDs):
listtoexclude = authormatches[i]
asixtuple = data, classvector, listtoexclude, i, usedate, regularization
sextuplets.append(asixtuple)
# Now do leave-one-out predictions.
print('Beginning multiprocessing.')
pool = Pool(processes = 11)
res = pool.map_async(modelingprocess.model_one_volume, sextuplets)
# After all files are processed, write metadata, errorlog, and counts of phrases.
res.wait()
resultlist = res.get()
assert len(resultlist) == len(orderedIDs)
logisticpredictions = dict()
for i, volid in enumerate(orderedIDs):
logisticpredictions[volid] = resultlist[i]
pool.close()
pool.join()
print('Multiprocessing concluded.')
truepositives = 0
truenegatives = 0
falsepositives = 0
falsenegatives = 0
allvolumes = list()
with open(outputpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
header = ['volid', 'dateused', 'pubdate', 'birthdate', 'firstpub', 'gender', 'nation', 'allwords', 'logistic', 'realclass', 'trainflag', 'trainsize', 'author', 'title', 'genretags']
writer.writerow(header)
for volid in IDsToUse:
metadata = metadict[volid]
dateused = metadata[datetype]
pubdate = metadata['pubdate']
birthdate = metadata['birthdate']
firstpub = metadata['firstpub']
gender = metadata['gender']
nation = metadata['nation']
author = metadata['author']
title = metadata['title']
allwords = volsizes[volid]
logistic = logisticpredictions[volid]
realclass = classdictionary[volid]
trainflag = metadata['trainflag']
trainsize = metadata['trainsize']
genretags = ' | '.join(metadata['tagset'])
outrow = [volid, dateused, pubdate, birthdate, firstpub, gender, nation, allwords, logistic, realclass, trainflag, trainsize, author, title, genretags]
writer.writerow(outrow)
allvolumes.append(outrow)
if logistic == 0.5:
print("equals!")
predictedpositive = random.sample([True, False], 1)[0]
elif logistic > 0.5:
predictedpositive = True
elif logistic < 0.5:
predictedpositive = False
else:
print('Oh, joy. A fundamental floating point error.')
predictedpositive = random.sample([True, False], 1)[0]
if predictedpositive and classdictionary[volid] > 0.5:
truepositives += 1
elif not predictedpositive and classdictionary[volid] < 0.5:
truenegatives += 1
elif not predictedpositive and classdictionary[volid] > 0.5:
falsenegatives += 1
elif predictedpositive and classdictionary[volid] < 0.5:
falsepositives += 1
else:
print("Wait a second, boss.")
donttrainon.sort(reverse = True)
trainingset, yvals, testset = sliceframe(data, classvector, donttrainon, 0)
trainingset, testset = modelingprocess.remove_zerocols(trainingset, testset)
newmodel = LogisticRegression(C = regularization)
trainingset, means, stdevs = normalizearray(trainingset, usedate)
newmodel.fit(trainingset, yvals)
coefficients = newmodel.coef_[0] * 100
coefficientuples = list(zip(coefficients, (coefficients / np.array(stdevs)), vocablist + ['pub.date']))
coefficientuples.sort()
if verbose:
for coefficient, normalizedcoef, word in coefficientuples:
print(word + " : " + str(coefficient))
print()
totalevaluated = truepositives + truenegatives + falsepositives + falsenegatives
if totalevaluated != len(IDsToUse):
print("Total evaluated = " + str(totalevaluated))
print("But we've got " + str(len(IDsToUse)))
accuracy = (truepositives + truenegatives) / totalevaluated
print('True positives ' + str(truepositives))
print('True negatives ' + str(truenegatives))
print('False positives ' + str(falsepositives))
print('False negatives ' + str(falsenegatives))
print()
print('The average size of the training set was ' + str(averagetrainingsize))
print()
precision = truepositives / (truepositives + falsepositives)
recall = truepositives / (truepositives + falsenegatives)
F1 = 2 * (precision * recall) / (precision + recall)
print("F1 : " + str(F1))
coefficientpath = outputpath.replace('.csv', '.coefs.csv')
with open(coefficientpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
for triple in coefficientuples:
coef, normalizedcoef, word = triple
writer.writerow([word, coef, normalizedcoef])
return accuracy, allvolumes, coefficientuples
def diachronic_tilt(allvolumes, modeltype, datelimits):
''' Takes a set of predictions produced by a model that knows nothing about date,
and divides it along a line with a diachronic tilt. We need to do this in a way
that doesn't violate crossvalidation. I.e., we shouldn't "know" anything
that the model didn't know. We tried a couple of different ways to do this, but
the simplest and actually most reliable is to divide the whole dataset along a
linear central trend line for the data!
'''
listofrows = list()
classvector = list()
# DEPRECATED
# if modeltype == 'logistic' and len(datelimits) == 2:
# # In this case we construct a subset of data to model on.
# tomodeldata = list()
# tomodelclasses = list()
# pastthreshold, futurethreshold = datelimits
for volume in allvolumes:
date = volume[1]
logistic = volume[8]
realclass = volume[9]
listofrows.append([logistic, date])
classvector.append(realclass)
# DEPRECATED
# if modeltype == 'logistic' and len(datelimits) == 2:
# if date >= pastthreshold and date <= futurethreshold:
# tomodeldata.append([logistic, date])
# tomodelclasses.append(realclass)
y, x = [a for a in zip(*listofrows)]
plt.axis([min(x) - 2, max(x) + 2, min(y) - 0.02, max(y) + 0.02])
reviewedx = list()
reviewedy = list()
randomx = list()
randomy = list()
for idx, reviewcode in enumerate(classvector):
if reviewcode == 1:
reviewedx.append(x[idx])
reviewedy.append(y[idx])
else:
randomx.append(x[idx])
randomy.append(y[idx])
plt.plot(reviewedx, reviewedy, 'ro')
plt.plot(randomx, randomy, 'k+')
if modeltype == 'logistic':
# all this is DEPRECATED
print("Hey, you're attempting to use the logistic-tilt option")
print("that we deactivated. Go in and uncomment the code.")
# if len(datelimits) == 2:
# data = pd.DataFrame(tomodeldata)
# responsevariable = tomodelclasses
# else:
# data = pd.DataFrame(listofrows)
# responsevariable = classvector
# newmodel = LogisticRegression(C = 100000)
# newmodel.fit(data, responsevariable)
# coefficients = newmodel.coef_[0]
# intercept = newmodel.intercept_[0] / (-coefficients[0])
# slope = coefficients[1] / (-coefficients[0])
# p = np.poly1d([slope, intercept])
elif modeltype == 'linear':
# what we actually do
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
slope = z[0]
intercept = z[1]
plt.plot(x,p(x),"b-")
plt.show()
x = np.array(x, dtype='float64')
y = np.array(y, dtype='float64')
classvector = np.array(classvector)
dividingline = intercept + (x * slope)
predicted_as_reviewed = (y > dividingline)
really_reviewed = (classvector == 1)
accuracy = sum(predicted_as_reviewed == really_reviewed) / len(classvector)
return accuracy
if __name__ == '__main__':
# If this class is called directly, it creates a single model using the default
# settings set below.
## PATHS.
# sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/texts/'
# extension = '.fic.tsv'
# metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/masterficmeta.csv'
# outputpath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/predictions.csv'
sourcefolder = '../newdata/'
extension = '.fic.tsv'
metadatapath = '../meta/finalmeta.csv'
vocabpath = '../lexicon/new10k.csv'
modelname = input('Name of model? ')
outputpath = '../results/' + modelname + str(datetime.date.today()) + '.csv'
# We can simply exclude volumes from consideration on the basis on any
# metadata category we want, using the dictionaries defined below.
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
daterange = input('Range of dates to use in the model? ')
if ',' in daterange:
dates = [int(x.strip()) for x in daterange.split(',')]
dates.sort()
if len(dates) == 2:
assert dates[0] < dates[1]
excludebelow['firstpub'] = dates[0]
excludeabove['firstpub'] = dates[1]
# allstewgenres = {'cozy', 'hardboiled', 'det100', 'chimyst', 'locdetective', 'lockandkey', 'crime', 'locdetmyst', 'blcrime', 'anatscifi', 'locscifi', 'chiscifi', 'femscifi', 'stangothic', 'pbgothic', 'lochorror', 'chihorror', 'locghost'}
# excludeif['negatives'] = allstewgenres
sizecap = 160
# CLASSIFY CONDITIONS
# We ask the user for a list of categories to be included in the positive
# set, as well as a list for the negative set. Default for the negative set
# is to include all the "random"ly selected categories. Note that random volumes
# can also be tagged with various specific genre tags; they are included in the
# negative set only if they lack tags from the positive set.
tagphrase = input("Comma-separated list of tags to include in the positive class: ")
positive_tags = [x.strip() for x in tagphrase.split(',')]
tagphrase = input("Comma-separated list of tags to include in the negative class: ")
# An easy default option.
if tagphrase == 'r':
negative_tags = ['random', 'grandom', 'chirandom']
else:
negative_tags = [x.strip() for x in tagphrase.split(',')]
# We also ask the user to specify categories of texts to be used only for testing.
# These exclusions from training are in addition to ordinary crossvalidation.
print()
print("You can also specify positive tags to be excluded from training, and/or a pair")
print("of integer dates outside of which vols should be excluded from training.")
print("If you add 'donotmatch' to the list of tags, these volumes will not be")
print("matched with corresponding negative volumes.")
print()
testphrase = input("Comma-separated list of such tags: ")
testconditions = set([x.strip() for x in testphrase.split(',') if len(x) > 0])
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
tiltaccuracy = diachronic_tilt(allvolumes, 'linear', [])
print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy))
| mit |
shervinea/enzynet | enzynet/real_time.py | 1 | 2275 | 'Plot in real-time'
# Authors: Afshine Amidi <[email protected]>
# Shervine Amidi <[email protected]>
# MIT License
import time, random
import math
from collections import deque
from matplotlib import pyplot as plt
start = time.time()
# Adapted from https://gist.github.com/Uberi/283a13b8a71a46fb4dc8
class RealTimePlot(object):
def __init__(self, max_entries=200, x_label=r'Epochs', y_label=r'Accuracy'):
# TeX friendly
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# Store
self.fig, self.axes = plt.subplots()
self.max_entries = max_entries
# x-axis
self.axis_x = deque(maxlen=max_entries)
# Training accuracy
self.axis_y_tr = deque(maxlen=max_entries)
self.lineplot_tr, = self.axes.plot([], [], "ro-")
# Validation accuracy
self.axis_y_val = deque(maxlen=max_entries)
self.lineplot_val, = self.axes.plot([], [], "bo-")
# Autoscale
self.axes.set_autoscaley_on(True)
# Set label names
self.axes.set_xlabel(x_label)
self.axes.set_ylabel(y_label)
def add(self, x, y_tr, y_val=None):
# Add new point
self.axis_x.append(x)
self.axis_y_tr.append(y_tr)
self.lineplot_tr.set_data(self.axis_x, self.axis_y_tr)
if y_val != None: # Validation accuracy is specified
self.axis_y_val.append(y_val)
self.lineplot_val.set_data(self.axis_x, self.axis_y_val)
# Change axis limits
self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15)
self.axes.relim(); self.axes.autoscale_view() # Rescale the y-axis
def animate(self, figure, callback, interval=50):
import matplotlib.animation as animation
def wrapper(frame_index):
self.add(*callback(frame_index))
self.axes.relim(); self.axes.autoscale_view() # Rescale the y-axis
return self.lineplot
animation.FuncAnimation(figure, wrapper, interval=interval)
if __name__ == "__main__":
# Initialization
display = RealTimePlot(max_entries=100)
i = 0
# Update in real-time
while True:
display.add(time.time() - start, i, i/2)
plt.pause(5)
i = i+1
| mit |
anurag313/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
ycaihua/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
herilalaina/scikit-learn | examples/calibration/plot_calibration_curve.py | 113 | 5904 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| mit |
oneandoneis2/GNU-parallel | src/optional/python/tests/test_loader.py | 4 | 1533 | import pandas as pd
import unittest
from gnuparallel import load
result_dir = '../../testresults'
class TestLoader(unittest.TestCase):
def test_basics(self):
df = load(result_dir)
self.assertEqual(set(df.columns), set(['a', 'b', 'resfile', '_stream']))
self.assertEqual(df.shape[0], 4)
def test_filters(self):
df = load(result_dir, a=2)
self.assertEqual(df.shape[0], 2)
self.assertEqual(df.a.sum(), 4)
df = load(result_dir, a=[2])
self.assertEqual(df.shape[0], 2)
self.assertEqual(df.a.sum(), 4)
df = load(result_dir, a=[1,2])
self.assertEqual(df.shape[0], 4)
self.assertEqual(df.a.sum(), 6)
df = load(result_dir, a=1000)
self.assertTrue(df.empty)
def test_infer_types(self):
df = load(result_dir)
self.assertEqual(df.a.dtype, pd.np.int64)
df = load(result_dir, _infer_types=False)
self.assertEqual(df.a.dtype, pd.np.object_)
def test_format(self):
df = load(result_dir, b=0.3)
self.assertTrue(df.empty)
df = load(result_dir, b=0.3, _format={'b': '%.2f'})
self.assertEqual(df.shape[0], 2)
def test_stream(self):
df = load(result_dir, _stream='stderr')
self.assertTrue((df._stream == 'stderr').all())
def test_process(self):
df = load(result_dir, a=1, _process=lambda x: pd.np.loadtxt(x).sum())
self.assertAlmostEqual(df.sum()['res'], 2.7)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
kuntzer/SALSA-public | 18_plot_transit_proba.py | 1 | 8321 | ''' 18-plot-transit-proba.py
=========================
AIM: Plots transit probabilities according to 17-treat-ephemerids.py.
A probability of 100% corresponds to being able to observe the target for
its whole period.
INPUT: files: - <orbit_id>_misc/ephemerids_obs<transit_duration>h_<max_interruptions>inter_V<mag_max><_SAA?>.npz
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_figures/ with the following format:
proba_<orbit_id>_<exoplanet_period>obs_<max_interruptions>inter_V%3.1f.png/.eps/.pdf
CMD: python 18-plot-transit-proba.py
ISSUES: <none known>
REQUIRES:
- Latex
- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/maps/ --> figures
* <orbit_id>_misc/ --> storages of data
REMARKS: Not with real catalogue.
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import matplotlib.cm as cm
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.constants as const
import resources.figures as figures
from resources.targets import *
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
from mpl_toolkits.basemap import Basemap
###########################################################################
### PARAMETERS
# orbit_iditude of the orbit in km
orbit_id = 702
apogee = 700
perigee = 700
# First minute in data set !
minute_ini = 0
# Last minute to look for
minute_end = 1440*365/12
# File name for the list of orbit file
orbits_file = 'orbits.dat'
typep = 'Neptunes'
if typep == 'Super-Earths':
# Orbit period [d]
exoplanet_period = 50
# Minimum observable time for plots [h]
transit_duration = 6
elif typep == 'Neptunes':
# Orbit period [d]
exoplanet_period = 13
# Minimum observable time for plots [h]
transit_duration = 3
# Maximum visible magnitude
mag_max = 10.
# Plot a few stars as well ?
stars= False
targets_exo=False
# Maximum interruption time tolerated [min]
max_interruptions = 20
# Take SAA into account?
SAA = True
# File name for the input file (in a compressed binary Python format)
if SAA: note = '_SAA'
else: note = ''
# File name for the input file (in a compressed binary Python format)
input_fname = 'ephemerids_obs%dh_%dinter_V%3.1f%s.npz' % (transit_duration,max_interruptions,mag_max,note)
# Print much information ?
verbose = False
# Nice plots?
fancy=True
# Save plots?
save = True
# Show figures ?
show = True
#####################################################################################################################
# for every region in the sky/worthy target:
# >> Find when you can look with transit_duration [h] with maximal max_interruptions [min]
# >>>> return start and end time of observations with duration of interruptions [min]
# >> can we observe a transit ?
# >>>> Vary the start of transit time by transit_duration [h] until exoplanet_period [h]
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee, perigee)
number_of_transit = exoplanet_period * 24. / transit_duration
###########################################################################
### INITIALISATION
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
data_grid = np.zeros(np.shape(ra_grid))
if stars:
ra_stars=[101.2833, 95.9875, 213.9167, 219.9, 279.2333, 78.6333, 114.8250, 88.7917]
dec_stars=[-16.7161, -52.6956, 19.1822, -60.8339, 38.7836, -8.2014, 5.2250, 7.4069]
y_offset=[0.5e6,0.5e6,-0.8e6,0.5e6,0.5e6,0.5e6,-0.8e6,0.5e6]
labels = ['Sirius','Canopus','Arcturus',r'$\alpha\mathrm{Centauri}$','Vega','Rigel','Procyon','Betelgeuse']
if targets_exo: ra_tar, dec_tar, magn = np.loadtxt('resources/defined-exo.csv', delimiter=';', unpack=True)
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
# loading data
print 'loading %s' % input_fname
sys.stdout.write("Loading worthy targets...\t")
sys.stdout.flush()
data = np.load(folder_misc+input_fname)
worthy_targets = data['worthy_targets']
start_obs=data['start_obs']
stop_obs=data['stop_obs']
interruptions_obs=data['interruptions_obs']
print 'Done, %d targets loaded' % len(worthy_targets)
###########################################################################
# cycling through the targets:
density = np.zeros(len(worthy_targets))
for index_target, target in enumerate(worthy_targets):
tar_start = start_obs[index_target,:]
tar_stop = stop_obs[index_target,:]
# print target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
if verbose: print index_target, target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
for i, f in zip(tar_start, tar_stop):
if i >= 0 and f > 0:
if verbose: print i/60/24, f/60/24, (f-i)/60
density[index_target]+=np.floor((f-i)/60 / transit_duration)
if verbose: print '-'*30
density[index_target]=float(density[index_target]) / number_of_transit * 100.
# Associate the density to a grid point
id_ra = np.where(np.abs(ras-target.Coordinates()[0]) < 0.05)[0]
id_dec= np.where(np.abs(decs-target.Coordinates()[1]) < 0.05)[0]
# Transform density in prob of transit:
data_grid[id_dec, id_ra] = density[index_target]
if verbose: print 'obs start | obs end | hours of obs'
#plt.figure()
#for index_target, target in enumerate(worthy_targets):
# c = density[index_target]
# plt.scatter(target.Coordinates()[0]*180./np.pi,target.Coordinates()[1]*180./np.pi,c=c, cmap=cm.jet, vmin=np.amin(density), vmax=np.amax(density), edgecolor='none', s=50)
#plt.xlim([0,360])
#plt.ylim([-90,90])
#plt.grid()
#cb=plt.colorbar()
#cb.set_label('Probabilty of transit of min. %d hours' % transit_duration)
###########################################################################
### Plotting
# transform 0 into no plotting in the data matrix
if fancy: figures.set_fancy()
fig = plt.figure()
m = Basemap(projection='moll',lon_0=180,resolution='c')
extent = (-np.pi,np.pi,-np.pi/2.,np.pi/2.)
ra_grid *= const.RAD
#ra_grid -= 180.
#ra_grid = ra_grid - 180 #= (ra_grid-np.pi) #*180. / np.pi
dec_grid *= const.RAD
m.contour( ra_grid,dec_grid,data_grid,10,colors='k',latlon=True)
CS = m.contourf( ra_grid ,dec_grid,data_grid,100,cmap=plt.cm.gist_stern,latlon=True,vmin=0)
#m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-60.,90.,30.),labels=[1,0,0,0])
m.drawmeridians(np.arange(0.,360.,30.))
ra__ = np.arange(0., 360., 30.)
#print ra__
x, y = m(ra__,ra__*0)
for x,y,ra in zip(x,y,ra__):
plt.text(x, y, figures.format_degree(ra), color='black', ha='center', weight='black', size='small') ##93c6ed
t = np.linspace(0., np.amax(density),5)
labels = ['%3.1f\%%' % a for a in t]
cbar = plt.colorbar(CS, orientation='horizontal',shrink=.8, ticks=t)
cbar.ax.set_xticklabels(labels)
l,b,w,h = plt.gca().get_position().bounds
ll,bb,ww,hh = cbar.ax.get_position().bounds
cbar.ax.set_position([ll, bb+0.1, ww, hh])
cbar.set_label('Probabilty of seeing a transit of %d hours for V=%3.1f' % (transit_duration,mag_max))
if stars:
x,y = m(ra_stars, dec_stars)
m.plot(x,y, 'w*', markersize=10)
for label, xpt, ypt, y_offset in zip(labels, x, y,y_offset):
plt.text(xpt, ypt+y_offset, label, color='white', size='x-small', ha='center', weight='black') # #93a4ed
if targets_exo:
x,y = m(ra_tar*180./np.pi, dec_tar*180./np.pi)
x,y = m(ra_tar, dec_tar)
m.scatter(x,y, c='white', edgecolor='k', marker='+', s=20,zorder=10, lw=0.5)
# Save plot
if save:
fname = 'proba_%d_%dobs_%dinter_V%3.1f' % (orbit_id, transit_duration, max_interruptions, mag_max)
figures.savefig(folder_figures+fname, fig, fancy)
print 'saved as %s' % folder_figures+fname
if show: plt.show()
| bsd-3-clause |
RobertABT/heightmap | build/scipy/scipy/interpolate/ndgriddata.py | 5 | 7154 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
points : (Npoints, Ndims) ndarray of floats
Data point coordinates.
values : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
self.tree = cKDTree(self.points)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
.. versionadded:: 0.9
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
>>> return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| mit |
biofilos/cluster_genomics | manual.py | 1 | 22645 | from glob import glob
import pandas as pd
from Bio import SeqIO
# from concurrent import futures
import os
import json
# import sys
import numpy as np
# import matplotlib
# matplotlib.use("Agg")
import matplotlib.pyplot as plt
import networkx as nx
import requests
from networkx.drawing.nx_agraph import graphviz_layout
# matplotlib.use("Agg")
import re
class Ensembl:
"""
This class contains server information and methods to fetch data from
Ensembl
"""
def __init__(self):
"""
Set server details
"""
self.server = "http://rest.ensemblgenomes.org"
self.ref_server = "http://rest.ensembl.org"
def search(self, prefix, query, server, content="application/json"):
"""
Perform generic search on Ensembl
:param content: type of response
:param prefix: extension to be used. It depends on the kind of data
that needs to be fetched
:param query: depending on the service, it can be a taxId, gene name, etc
:return: dictionary
"""
# If query is a dictionary, it will use POST (for sequences)
if type(query) == dict:
header = {"Content-Type": "application/json", "Accept": "application/json"}
query_str = json.dumps(query)
response = requests.post(server + prefix, headers=header, data=query_str)
if not response.ok:
print("Trying reference server")
response = requests.post(self.ref_server + prefix, headers=header, data=query_str)
else:
extension = prefix + query
header = {"Content-type": content}
response = requests.get(server + extension, headers=header)
# try the reference server if the default one doesn't work
if not response.ok:
response = requests.get(self.ref_server + extension, headers=header)
# Fail gracefully
if not response.ok:
print("{} failed".format(query))
# response.raise_for_status()
return None
else:
if content == "application/json":
return response.json()
else:
return response.text
def get_ref_sp(self):
"""
Get species in ensembl reference (ensembl.org), so that the correct
server is used
:return: list of species
"""
prefix = "/info/species?"
response = self.search(prefix, "", self.ref_server)
sp_list = [x['name'] for x in response['species']]
return sp_list
def genomes_info(self):
"""
Get basic information about all the genomes available in Ensembl
:return: data frame
"""
genomes = []
genomes_list = self.search("/info/genomes", "", self.server)
for genome in genomes_list:
taxid = clean_name(genome['taxonomy_id'])
name = clean_name(genome['name'])
species = clean_name(genome['species'])
assembly_level = clean_name(genome['assembly_level'])
assembly_id = clean_name(genome['assembly_id'])
assembly_name = clean_name(genome['assembly_name'])
# Save information to CSV
genomes.append([taxid, name, species, assembly_level, assembly_id, assembly_name])
genomes_df = pd.DataFrame(genomes, columns=["taxonomy_id", "name", "species", "assembly_level",
"assembly_id", "assembly"])
genomes_df.loc[:, "taxonomy_id"] = genomes_df.loc[:, "taxonomy_id"].astype(int)
genomes_df.set_index("taxonomy_id", inplace=True)
return genomes_df
def get_taxonomy(self, taxid):
"""
Get taxonomic lineage
:param taxid: taxonomic id (or species name)
:return: dash-separated lineage (str)
"""
prefix = "/taxonomy/classification/"
tax_id_str = str(taxid)
lineage = []
lineage_list = self.search(prefix, tax_id_str, self.server)
if lineage_list:
for node in lineage_list:
lineage.append(node['name'])
# It was decided to organize it as one string, in case it needs to be
# saved in a file
return "-".join(lineage)
else:
return None
def get_chrom_info(self, species, min_length):
"""
Get list of top-level (the best assembled level: chromosome, scaffold, etc)
genomic features that are greater than a specified size
:param species: species name (lower_case
:param min_length: int
:return: tuple: first element: dictionary of genomic features and their length.
second element: total genome length covered by the selected genomic features
"""
# Initialize variables
c_value = 0
karyotype = {}
# Set assembly information service
prefix = "/info/assembly/"
# Format species name for query
sp_query = "{}?".format(species)
karyotype_raw = self.search(prefix, sp_query, self.server)
# Extract information for top-level genomic regions above the minimum size
for chrom in karyotype_raw['top_level_region']:
chrom_length = chrom['length']
if chrom_length >= min_length:
# Only include chromosome if it has not been already included
if chrom['name'] not in karyotype:
karyotype[chrom['name']] = chrom_length
# increment genomic coverage by the length of the fetched genomic feature
c_value += chrom_length
else:
# If a chromosome name is repeated, it is an unplaced scaffold and will be ignored
del karyotype[chrom['name']]
return karyotype, c_value
def get_region_genes(self, species, chromosome, start, end):
"""
Get annotation for protein coding genes overlapping genomic coordinates
:param species: ensembl species name
:param chromosome: chromosome name
:param start: genomic coordinates start
:param end: genomic coordinates end
:return: dataframe with annotations
"""
prefix = "/overlap/region/"
query = "{}/{}:{}-{}?feature=gene;biotype=protein_coding".format(species, chromosome, start, end)
genes_dict = self.search(prefix=prefix, query=query, server=self.server)
genes_list = []
columns = ['species', 'chromosome', 'acc', 'start', 'end',
'strand', 'symbol']
# Parse response if there is more than one gene in the region
if genes_dict and len(genes_dict) != 0:
for gene in genes_dict:
if gene['feature_type'] == "gene":
g_id = gene['id']
g_start = gene['start']
g_end = gene['end']
g_strand = int(gene['strand'])
try:
g_symbol = gene['external_name']
except KeyError:
g_symbol = g_id
# g_description = gene['description']
genes_list.append([species, chromosome, g_id, g_start, g_end,
g_strand, g_symbol])
genes_df = pd.DataFrame(genes_list, columns=columns)
# genes_df.set_index('acc', inplace=True)
genes_df.loc[:, 'length'] = genes_df['end'] - genes_df['start']
for col in ['start', 'end', 'strand']:
genes_df.loc[:, col] = genes_df[col].astype(int)
genes_df.loc[:, 'chromosome'] = genes_df['chromosome'].astype(str)
else:
genes_df = pd.DataFrame([], columns=columns)
return genes_df.set_index("acc")
def get_seq(self, genes, seq_type, file_out, server):
"""
Get protein sequence as string
:param server: ensembl server to use (str)
:param file_out: output file (fasta)
:param seq_type: protein (protein, cdna)
:param genes: pd.DataFrame containing genes annotation
:return: protein sequence (str)
"""
assert seq_type in ['protein', 'cdna'], "{} is not a valid type, should be 'protein' or 'cdna'".format(seq_type)
prefix = "/sequence/id?type={}".format(seq_type)
# max number of genes to download at the same time
max_seqs = 50
# Open ouput file handle
# Collect accessions of sequences already present in the fasta file
dones = set([x.split("|")[2] for x in open(file_out) if x.startswith(">")])
file_o = open(file_out, "a")
genes = genes.loc[~(genes.index.isin(dones))]
# gene accession numbers
gene_list = [x for x in genes.index.values]
downloaded = 0
for genes_chunk in [gene_list[i:i + max_seqs] for i in range(0, len(gene_list), max_seqs)]:
downloaded += max_seqs
# Only work on genes that are not in the fasta file
perc_done = round(((len(dones) + downloaded) / (len(gene_list) + len(dones))) * 100, 3)
species_set = "\n".join(set(genes.loc[genes_chunk, 'species']))
print("{}\nDownloading {} genes ({}%)".format(species_set, len(genes_chunk), perc_done))
# print(genes_chunk)
query = {"ids": genes_chunk}
response = self.search(prefix, query, server)
response_list = response
for acc, seq in zip(genes_chunk, response_list):
gene_record = genes.loc[acc]
chromosome = gene_record['chromosome']
symbol = gene_record['symbol']
species = gene_record['species']
start = gene_record['start']
end = gene_record['end']
strand = gene_record['strand']
name = ">{}|{}|{}|{}|{}|{}|{}\n".format(species, chromosome, acc, symbol,
start, end, strand)
file_o.write(name)
seq_str = seq['seq'] + "\n"
file_o.write(seq_str)
file_o.flush()
file_o.close()
# Formatting functions
def clean_name(str_in, replace_fields=[","]):
"""
Removes undesired fields
:param replace_fields: list of fields to be replaced with underscores
:param str_in:
:return:
"""
str_in = str(str_in)
for field in replace_fields:
str_in = str_in.replace(field, '_')
return str_in
def tax_graph(t_lineage):
"""
Generates a graph from a list of lineages
:param t_lineage: list of lineages. Each lineage is a '-' separated string of OTUs
:return: networkX graph
"""
lin_g = nx.Graph()
for lin in t_lineage:
lin_list = lin.split('-')
lin_g.add_edges_from([(lin_list[i], lin_list[i + 1]) for i in range(0, len(lin_list) - 1)])
return lin_g
def md2list(md):
"""
Transform a markdown list to a python list, ignoring headers
:param md: markdown file containing a list (marked with *)
:return: list
"""
result_list = []
for line in open(md):
if not line.startswith("#"):
item = line.strip("* ").strip("\n")
result_list.append(item)
return result_list
def draw_graph(lin_G):
"""
Draws networkX graph with labels
:param lin_G: Graph
:return: None
"""
# Drawing the phylogram
layout = graphviz_layout(lin_G)
nx.draw(lin_G, pos=layout)
nx.draw_networkx_labels(lin_G, pos=layout)
plt.show()
def chop_chromosome(chrom_length, chunk_size):
"""
Given a chromosome length, return a list of tuples of a fixed size (or to the end)
:param chrom_length: chromosome length
:param chunk_size: size of chunk
:return: list
"""
region_chunks = []
for region in range(0, chrom_length, int(chunk_size)):
if region + chunk_size > chrom_length:
region_chunks.append((int(region), int(chrom_length)))
else:
region_chunks.append((int(region), int(region + chunk_size)))
return region_chunks
# Remove overlapping genes
def sp_loop(in_table, columns):
"""
Filters a table by a number of columns
:param in_table: Input table
:param columns: Column names to perform the filter
:return: list of tables for each combination of column names
"""
# Initialize the list of tables
list_results = []
for col_set in np.unique(in_table.set_index(columns).index):
filtered_table = in_table
cols_to_filter = {col_name: col_value for col_name, col_value in zip(columns, col_set)}
for var in cols_to_filter.keys():
filtered_table = filtered_table.loc[filtered_table[var] == cols_to_filter[var]]
list_results.append(filtered_table)
return list_results
def remove_overlaps(chrom_only):
"""
Removes overlapping genes according to these rules:
If the second gene (sorted by start coordinate) starts before the gene before ends
AND both genes are in the same strand
:param chrom_only: dataframe for each chromosome on a species
:return: Dataframe
"""
# Remove duplicated accession numbers
chrom_only.reset_index(inplace=True)
chrom_only.drop_duplicates('acc', inplace=True)
chrom_only.set_index('acc', inplace=True)
chrom_only.sort_values("start", inplace=True)
first_to_one_to_last = chrom_only.index.values[:-1]
second_to_last = chrom_only.index.values[1:]
overlapping = chrom_only.loc[second_to_last,
'start'].values < chrom_only.loc[first_to_one_to_last, 'end'].values
overlapping_all = np.concatenate(([False], overlapping))
same_strand = chrom_only.loc[second_to_last,
'strand'].values == chrom_only.loc[first_to_one_to_last, 'strand'].values
same_strand_all = np.concatenate(([False], same_strand))
for_removal = same_strand_all & overlapping_all
for_removal_indices = chrom_only.loc[for_removal].index
non_overlapping_table = chrom_only.drop(for_removal_indices)
return non_overlapping_table
def remove_overlaps_sp(sp_table, sp):
"""
Loads a csv named sp.csv in the current directory.
Adds a length column
Removes genes with duplicated start coordinates.
:param sp:
:return:
"""
# sp_table_list = []
# Load species table
sp_table = sp_table.loc[sp_table.species == sp]
# Set indices so that a data frame per chromosome with ordered genes by position and length can be easily selected
sp_table.sort_values(['start', 'length'], inplace=True)
# Remove entries that have the same start position and leave the longest one
sp_table.drop_duplicates('start', inplace=True)
# Remove overlapping genes
# Get list of tables per chromosome
chrom_tables = sp_loop(sp_table, ['species', 'chromosome'])
# Get list of tables (one per chromosome) without overlapping genes
non_overlapping = []
for chrom_table in chrom_tables:
before = chrom_table.shape[0]
no_overlaps = remove_overlaps(chrom_table)
after = no_overlaps.shape[0]
while before != after:
before = no_overlaps.shape[0]
no_overlaps = remove_overlaps(no_overlaps)
after = no_overlaps.shape[0]
non_overlapping.append(no_overlaps)
parsed_sp_table = pd.concat(non_overlapping)
return parsed_sp_table
# Clean lineage data
class Lineage:
def __init__(self, lineage_df, genome, name):
self.lineage = lineage_df
self.graph = tax_graph(self.lineage['lineage'].values)
self.genome = genome
self.name = name
self.df = self.genome.loc[self.genome['genus'].isin(self.lineage['genus'])]
# self.included_md = None
# self.remove_list = None
def clean(self, included_md, remove_list):
genera_list = md2list(included_md)
selected_genera = self.df.loc[self.df['genus'].isin(genera_list)]
selected_genera.drop(remove_list, errors="ignore", inplace=True)
selected_genera.loc[:, "big_group"] = self.name
return selected_genera
def clean_graph(self, genes):
clean_lineage = self.lineage.loc[self.lineage['genus'].isin(genes['genus']), 'lineage']
return tax_graph(clean_lineage)
e = Ensembl()
# genome_df = pd.read_csv("genomes.csv")
# genera_df = pd.read_csv("genera.csv")
seq_dict = {}
fasta_dict = {}
chrom_info = {}
sp_cval = []
for fasta in glob("*.fa"):
# Download chromosome information
sp = fasta.split(".")[0]
karyotype, c_val = e.get_chrom_info(sp, 10000)
sp_cval.append([sp, c_val])
chrom_info[sp] = karyotype
species = fasta.split(".")[0]
print(species)
# Parse fasta header annotation
for seq in SeqIO.parse(fasta, "fasta"):
line = seq.description
if "gene_biotype:protein_coding" in line:
data = line.split(" ")
coords = data[2].split(":")
chrom = coords[2]
start, end, strand = coords[3], coords[4], coords[5]
acc = data[3].split(":")[1]
if "gene_symbol" in line:
if "description" in line:
regex = r"gene_symbol:(.*)description"
else:
regex = r"gene_symbol:(.*)"
# symbol = data[7].split(":")[1]
symbol_raw = re.search(regex, line)
symbol = symbol_raw.groups()[0].strip(" ").replace(" ", "_")
else:
symbol = acc
if acc not in seq_dict:
seq_dict[acc] = {"species": species,
"chromosome": chrom.replace(" ", "_").replace("|", "_"),
"acc": acc,
"coords": [int(start), int(end)],
"strand": strand,
"symbol": symbol.replace(" ", "_").replace("|", "_")}
else:
seq_dict[acc]["coords"] += [int(start), int(end)]
if acc in fasta_dict:
if len(fasta_dict[acc]) < len(seq.seq):
fasta_dict[acc] = str(seq.seq)
else:
fasta_dict[acc] = str(seq.seq)
# Convert chromosome data to dataframe
chrom_lengths = []
for species in chrom_info:
for chrom in chrom_info[species]:
chrom_lengths.append([species, chrom, chrom_info[species][chrom]])
chromosomes = pd.DataFrame(chrom_lengths, columns=["species","chromosome","length"])
chromosomes.to_csv("chromosomes.csv")
for rec in seq_dict:
seq_dict[rec]["start"] = min(seq_dict[rec]["coords"])
seq_dict[rec]["end"] = max(seq_dict[rec]["coords"])
del seq_dict[rec]["coords"]
genes = pd.DataFrame(seq_dict).T
if not os.path.exists("genes_raw.fa"):
with open("genes_raw.fa", "w") as fileO:
for acc in fasta_dict:
record = genes.loc[acc]
name = ">{}|{}|{}|{}|{}|{}|{}\n{}\n".format(record["species"], record["chromosome"], acc, record["symbol"],
record["start"], record["end"], record["strand"],
fasta_dict[acc])
fileO.write(name)
genes.loc[:, "length"] = genes["end"] - genes["start"]
genes.reset_index(drop=True, inplace=True)
genes = genes[['species', 'chromosome', 'acc', 'symbol', 'start', 'end', 'strand', 'length']]
genes.set_index("acc", inplace=True)
genes.to_csv("genes_raw.csv")
genes_file = "genes_raw.csv"
if os.path.exists('genes_parsed.csv'):
all_sp_table_clean = pd.read_csv('genes_parsed.csv')
all_sp_table_clean.set_index("acc", inplace=True)
else:
# Don't remove overlaps
all_sp_table_clean = genes.loc[:, ['species', 'chromosome', 'symbol',
'start', 'end', 'strand', 'length']]
all_sp_table_clean.to_csv("genes_parsed.csv")
# Remove overlapping genes
# CPUS = 8
# genes_raw = pd.read_csv(genes_file)
# genes_raw.loc[:, "chromosome"] = genes_raw["chromosome"].astype(str)
# genes_raw.loc[:, "start"] = genes_raw["start"].astype(str)
# genes_raw.loc[:, "end"] = genes_raw["end"].astype(str)
# genes_raw.loc[:, "strand"] = genes_raw["strand"].astype(str)
# genes_raw.loc[:, "end"] = genes_raw["end"].astype(str)
# # genes_raw.set_index("acc", inplace=True)
#
# print("Removing overlapping genes")
# with futures.ProcessPoolExecutor(CPUS) as pool:
# sp_tables = []
# for sp in set(genes_raw.species.values):
# one_result = pool.submit(remove_overlaps_sp, genes_raw, sp)
# sp_tables.append(one_result.result())
# all_sp_table = pd.concat(sp_tables)
# all_sp_table_clean = all_sp_table.loc[:, ['species', 'chromosome', 'symbol',
# 'start', 'end', 'strand', 'length']]
# # all_sp_table_clean.set_index('acc')
# all_sp_table_clean.to_csv('genes_parsed.csv')
# del genes_raw
# Keep genes on chromosomes that are more than 10kb long and have more than 10 genes
genomes = pd.read_csv("chromosomes.csv")
genomes = genomes.loc[genomes["length"] >= 1e4]
genomes.set_index(["species", "chromosome"], inplace=True)
over_10kb = set(genomes.index)
genes_chrom = all_sp_table_clean.groupby(["species", "chromosome"]).count()
gene_counts = genes_chrom.loc[genes_chrom["symbol"] >= 10, "symbol"]
over_10_genes = set(gene_counts.index)
# Retain chromosomes that meet both conditions
over10_10kb = list(over_10kb.intersection(over_10_genes))
all_sp_table_clean.reset_index(inplace=True)
all_sp_table_clean.set_index(["species", "chromosome"], inplace=True)
genomes = genomes.loc[over10_10kb]
all_sp_table_clean = all_sp_table_clean.loc[over10_10kb]
genomes.reset_index(inplace=True)
genomes.set_index("species", inplace=True)
all_sp_table_clean.reset_index(inplace=True)
all_sp_table_clean.set_index("acc", inplace=True)
# del all_sp_table_clean["Unnamed: 0"]
genomes.to_csv("chromosomes.csv")
all_sp_table_clean.to_csv("genes_parsed.csv")
# Filter out genes that were removed from chromosomes less than 10kb long or
# with less than 10 genes
fasta_out = open("seqs_hmmer.fa", "a")
for gene in SeqIO.parse("genes_raw.fa","fasta"):
gene_id = gene.description.split("|")[2]
if gene_id in all_sp_table_clean.index:
SeqIO.write(gene, fasta_out, "fasta")
print("DONE")
| gpl-3.0 |
dcelisgarza/phd_excercises | comp_techniques/problem_class1.py | 1 | 2837 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 25 12:19:01 2016
@author: dcg513
"""
#Import useful routines from modules
from scipy.integrate import odeint
from numpy import linspace, exp, sin, cos, array
import matplotlib.pyplot as plt
plt.close("all")
plt.rc("text", usetex = True)
plt.rc("font", family = "serif", size = 16)
#Here we define our function which returns df/dt = -a*f
#Note we can pass in a, but it defaults to 10
def dfdt(curF, curT, params):
#We don’t do anything with curT
return -params[0]*sin(curF) - params[1]*cos(curT)
#Now define the times at which we want to know the result
time=linspace(0,10,400)
#Set the initial condition
f0=10.
params = array([30.,50.])
result = odeint(dfdt,f0,time,args=(params,))
plt.figure(1)
plt.plot(time, result, label = r"$\frac{\mathrm{d}f}{\mathrm{d}t} = -" + str(params[0]) + " \sin(f) -" +str(params[1]) + " \cos(t)$")
plt.xlabel("Time"); plt.ylabel(r"$f$")
plt.legend(loc=0) ; plt.show()
def spring(f, x, params):
# params = [k, m]
# Assign x.
x = f[0]
# Assign v.
v = f[1]
# Construct derivatives.
dxdt = v
d2xdt2 = -params[0]/params[1] * x
# Return derivatives.
return [dxdt, d2xdt2]
params = array([0.5,0.01])
f = array([0.4,0.])
time = linspace(0,10,1000)
result = odeint(spring,f,time,args=(params,))
plt.figure(2)
plt.plot(time, result[:,0], label = r"$x(t)$.", linestyle = "-")
plt.plot(time, result[:,1], label = r"$v(t)$.", linestyle = "--")
plt.legend(loc=0)
plt.xlabel(r"Time, s"); plt.ylabel(r"Value")
plt.figure(3)
plt.plot(time, params[1]*result[:,1]**2/2, label = r"$K(t)$", linestyle = "--")
plt.plot(time, params[0]*result[:,0]**2/2, label = r"$V(t)$", linestyle = "-.")
plt.plot(time, params[0]*result[:,0]**2/2 + params[1]*result[:,1]**2/2, label = r"$E(t)$", linestyle = "-")
plt.legend(loc=0)
plt.xlabel(r"Time, s"); plt.ylabel(r"Energy")
def dampspring(f, x, params):
# params = [k, m]
# Assign x.
x = f[0]
# Assign v.
v = f[1]
# Construct derivatives.
dxdt = v
d2xdt2 = -params[0]/params[1] * x - params[2]/params[1]*v
# Return derivatives.
return [dxdt, d2xdt2]
params = array([0.5,0.01,0.005])
f = array([0.4,0.])
time = linspace(0,10,1000)
result = odeint(dampspring,f,time,args=(params,))
plt.figure(4)
plt.plot(time, result[:,0], label = r"$x(t)$.", linestyle = "-")
plt.plot(time, result[:,1], label = r"$v(t)$.", linestyle = "--")
plt.legend(loc=0)
plt.xlabel(r"Time, s"); plt.ylabel(r"Value")
plt.figure(5)
plt.plot(time, params[1]*result[:,1]**2/2, label = r"$K(t)$", linestyle = "--")
plt.plot(time, params[0]*result[:,0]**2/2, label = r"$V(t)$", linestyle = "-.")
plt.plot(time, params[0]*result[:,0]**2/2 + params[1]*result[:,1]**2/2, label = r"$E(t)$", linestyle = "-")
plt.legend(loc=0)
plt.xlabel(r"Time, s"); plt.ylabel(r"Energy")
| gpl-3.0 |
zfrenchee/pandas | pandas/tests/test_multilevel.py | 1 | 106146 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101,W0141
from warnings import catch_warnings
import datetime
import itertools
import pytest
import pytz
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notna, isna, Timestamp
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, product as
cart_product, zip)
import pandas as pd
import pandas._libs.index as _index
class Base(object):
def setup_method(self, method):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'], inplace=True)
class TestMultiLevel(Base):
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_append_index(self):
idx1 = Index([1.1, 1.2, 1.3])
idx2 = pd.date_range('2011-01-01', freq='D', periods=3,
tz='Asia/Tokyo')
idx3 = Index(['A', 'B', 'C'])
midx_lv2 = MultiIndex.from_arrays([idx1, idx2])
midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3])
result = idx1.append(midx_lv2)
# see gh-7112
tz = pytz.timezone('Asia/Tokyo')
expected_tuples = [(1.1, tz.localize(datetime.datetime(2011, 1, 1))),
(1.2, tz.localize(datetime.datetime(2011, 1, 2))),
(1.3, tz.localize(datetime.datetime(2011, 1, 3)))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
tm.assert_index_equal(result, expected)
result = midx_lv2.append(idx1)
expected = Index(expected_tuples + [1.1, 1.2, 1.3])
tm.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv2)
expected = MultiIndex.from_arrays([idx1.append(idx1),
idx2.append(idx2)])
tm.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv3)
tm.assert_index_equal(result, expected)
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
np.array([(1.1, tz.localize(datetime.datetime(2011, 1, 1)), 'A'),
(1.2, tz.localize(datetime.datetime(2011, 1, 2)), 'B'),
(1.3, tz.localize(datetime.datetime(2011, 1, 3)), 'C')] +
expected_tuples), None)
tm.assert_index_equal(result, expected)
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
assert isinstance(multi.index, MultiIndex)
assert not isinstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
assert isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']), np.array(
['x', 'y', 'x', 'y'])])
assert isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'], ['x', 'y', 'x', 'y']])
assert isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
assert isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
tm.assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
tm.assert_series_equal(result, expected, check_names=False)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
tm.assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
tm.assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(level='month').transform(
np.sum)
expected = op(self.ymd['A'], broadcasted)
expected.name = 'A'
tm.assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
unpickled = tm.round_trip_pickle(frame)
tm.assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
expected = self.frame.iloc[[0, 3]]
reindexed = self.frame.loc[[('foo', 'one'), ('bar', 'one')]]
tm.assert_frame_equal(reindexed, expected)
with catch_warnings(record=True):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
tm.assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
assert chunk.index is new_index
chunk = self.ymd.loc[new_index]
assert chunk.index is new_index
with catch_warnings(record=True):
chunk = self.ymd.ix[new_index]
assert chunk.index is new_index
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
assert chunk.columns is new_index
chunk = ymdT.loc[:, new_index]
assert chunk.columns is new_index
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
assert lines[2].startswith('a 0 foo')
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
tm.assert_almost_equal(col.values, df.values[:, 0])
with pytest.raises(KeyError):
df[('foo', 'four')]
with pytest.raises(KeyError):
df['foobar']
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
# TODO(wesm): unused?
# result2 = s.loc[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
tm.assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
assert result == expected
# fancy
expected = s.reindex(s.index[49:51])
result = s.loc[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
with catch_warnings(record=True):
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
# key error
pytest.raises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
pytest.raises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
tm.assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
assert isna(s.values[42:65]).all()
assert notna(s.values[:42]).all()
assert notna(s.values[65:]).all()
s[2000, 3, 10] = np.nan
assert isna(s[49])
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
tm.assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
with tm.assert_raises_regex(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.iloc[:4]
expected = self.frame[:4]
tm.assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.iloc[:4] = 0
assert (cp.values[:4] == 0).all()
assert (cp.values[4:] != 0).all()
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.loc[:, 'value']
tm.assert_series_equal(df['value'], result)
with catch_warnings(record=True):
result = df.ix[:, 'value']
tm.assert_series_equal(df['value'], result)
result = df.loc[df.index[1:3], 'value']
tm.assert_series_equal(df['value'][1:3], result)
result = df.loc[:, :]
tm.assert_frame_equal(df, result)
result = df
df.loc[:, 'value'] = 10
result['value'] = 10
tm.assert_frame_equal(df, result)
df.loc[:, :] = 10
tm.assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
tm.assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
tm.assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
tm.assert_frame_equal(cp['a'], cp['b'])
# ---------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
assert (df['A'].values == 0).all()
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
sliced_a1 = df['A', '1']
sliced_a2 = df['A', '2']
sliced_b1 = df['B', '1']
tm.assert_series_equal(sliced_a1, sliced_b1, check_names=False)
tm.assert_series_equal(sliced_a2, sliced_b1, check_names=False)
assert sliced_a1.name == ('A', '1')
assert sliced_a2.name == ('A', '2')
assert sliced_b1.name == ('B', '1')
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.loc[(0, 0), :]
expected = idf.loc[0, 0]
expected2 = idf.xs((0, 0))
with catch_warnings(record=True):
expected3 = idf.ix[0, 0]
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected2)
tm.assert_series_equal(result, expected3)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.loc[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.loc[2000, 1, 6][['A', 'B', 'C']]
tm.assert_series_equal(result, expected)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.loc[('bar', 'two')]
tm.assert_series_equal(xs, xs2)
tm.assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a', 'abcde', 1),
('b', 'bbcde', 2),
('y', 'yzcde', 25),
('z', 'xbcde', 24),
('z', None, 26),
('z', 'zbcde', 25),
('z', 'ybcde', 26),
]
df = DataFrame(acc,
columns=['a1', 'a2', 'cnt']).set_index(['a1', 'a2'])
expected = DataFrame({'cnt': [24, 26, 25, 26]}, index=Index(
['xbcde', np.nan, 'zbcde', 'ybcde'], name='a2'))
result = df.xs('z', level='a1')
tm.assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.loc['foo']
expected = self.frame.T['foo'].T
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.loc[2000, 4]
tm.assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1,
0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.loc['foo', 'one']
tm.assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'), (
'p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
tm.assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
pytest.raises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
tm.assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
pytest.raises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.loc[20111201, :]
tm.assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
assert len(result) == 2
tm.assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
tm.assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.loc[2000, 5]['A']
tm.assert_series_equal(result, expected)
# not implementing this for now
pytest.raises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# tm.assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
result = df['bar']
result2 = df.loc[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.loc[1:2]
exp = frame.reindex(frame.index[2:])
tm.assert_frame_equal(res, exp)
frame.loc[1:2] = 7
assert (frame.loc[1:2] == 7).values.all()
series = Series(np.random.randn(len(index)), index=index)
res = series.loc[1:2]
exp = series.reindex(series.index[2:])
tm.assert_series_equal(res, exp)
series.loc[1:2] = 7
assert (series.loc[1:2] == 7).values.all()
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.loc[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
# raises exception
pytest.raises(KeyError, frame.loc.__getitem__, 3)
# however this will work
result = self.frame.iloc[2]
expected = self.frame.xs(self.frame.index[2])
tm.assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
tm.assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
tm.assert_series_equal(dft['foo', 'two'], s > s.median())
# assert isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
tm.assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.loc[('bar', 'two'), 'B'] = 5
assert self.frame.loc[('bar', 'two'), 'B'] == 5
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.loc[('bar', 'two'), 1] = 7
assert df.loc[('bar', 'two'), 1] == 7
with catch_warnings(record=True):
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
assert df.loc[('bar', 'two'), 1] == 7
def test_fancy_slice_partial(self):
result = self.frame.loc['bar':'baz']
expected = self.frame[3:7]
tm.assert_frame_equal(result, expected)
result = self.ymd.loc[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
tm.assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.loc[('a', 'y'), :]
expected = df.loc[('a', 'y')]
tm.assert_frame_equal(result, expected)
result = df.loc[('a', 'y'), [1, 0]]
expected = df.loc[('a', 'y')][[1, 0]]
tm.assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[('a', 'y'), [1, 0]]
tm.assert_frame_equal(result, expected)
pytest.raises(KeyError, df.loc.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_delevel_infer_dtype(self):
tuples = [tuple
for tuple in cart_product(
['foo', 'bar'], [10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples, names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
assert is_integer_dtype(deleveled['prm1'])
assert is_float_dtype(deleveled['prm2'])
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
assert len(deleveled.columns) == len(self.ymd.columns)
deleveled = self.series.reset_index()
assert isinstance(deleveled, DataFrame)
assert len(deleveled.columns) == len(self.series.index.levels) + 1
deleveled = self.series.reset_index(drop=True)
assert isinstance(deleveled, Series)
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count()
expected = expected.reindex_like(result).astype('i8')
tm.assert_frame_equal(result, expected)
self.frame.iloc[1, [1, 2]] = np.nan
self.frame.iloc[7, [0, 1]] = np.nan
self.ymd.iloc[1, [1, 2]] = np.nan
self.ymd.iloc[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
tm.assert_raises_regex(
TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
tm.assert_index_equal(result.columns, Index(list('ABC'), name='exp'))
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'], ['one', 'two',
'three', 'four']],
labels=[[0, 0, 0, 2, 2], [2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
tm.assert_series_equal(
result.astype('f8'), expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
tm.assert_series_equal(
result.astype('f8'), expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0], name='A')
tm.assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_get_level_number_out_of_bounds(self):
with tm.assert_raises_regex(IndexError, "Too many levels"):
self.frame.index._get_level_number(2)
with tm.assert_raises_regex(IndexError,
"not a valid level number"):
self.frame.index._get_level_number(-3)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked.unstack()
# test that ints work
self.ymd.astype(int).unstack()
# test that int32 work
self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0), (
1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert not left.index.is_unique
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(np.arange(12).reshape(4, 3),
index=list('abab'),
columns=['1st', '2nd', '3rd'])
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd', '3rd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
np.arange(3), 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ['1st', '2nd', '1st']
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd']], labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ('a', 2), ('b', 1), ('a', 1), ('b', 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(levels=[['a', 'b'], [1, 2], ['1st', '2nd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.repeat(
[1, 0, 1], [3, 6, 3]), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df['foo'].stack().sort_index()
tm.assert_series_equal(stacked['foo'], result, check_names=False)
assert result.name is None
assert stacked['bar'].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive', 'activ', 'activ',
'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(
restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
assert unstacked.index.name == 'first'
assert unstacked.columns.names == ['exp', 'second']
restacked = unstacked.stack()
assert restacked.index.names == self.frame.index.names
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
tm.assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
tm.assert_frame_equal(restacked, self.ymd)
assert restacked.index.names == self.ymd.index.names
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])
def test_stack_names_and_numbers(self):
unstacked = self.ymd.unstack(['year', 'month'])
# Can't use mixture of names and numbers to stack
with tm.assert_raises_regex(ValueError, "level should contain"):
unstacked.stack([0, 'month'])
def test_stack_multiple_out_of_bounds(self):
# nlevels == 3
unstacked = self.ymd.unstack(['year', 'month'])
with tm.assert_raises_regex(IndexError, "Too many levels"):
unstacked.stack([2, 3])
with tm.assert_raises_regex(IndexError,
"not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'],
freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02',
'2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10',
'2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(
['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
tm.assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU').mean()
rs = down.stack('ID')
xp = unst.loc[:, ['VAR1']].resample('W-THU').mean().stack('ID')
xp.columns.name = 'Params'
tm.assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = DataFrame({'A': ['a1', 'a2'], 'B': ['b1', 'b2'], 'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
assert len(stacked) > len(stacked.dropna())
stacked = df.unstack().stack(dropna=True)
tm.assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 1
]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
tm.assert_series_equal(result, expected, check_names=False)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
assert len(result.columns) == 4
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_order_with_unsorted_levels(self):
# GH 16323
def manual_compare_stacked(df, df_stacked, lev0, lev1):
assert all(df.loc[row, col] ==
df_stacked.loc[(row, col[lev0]), col[lev1]]
for row in df.index for col in df.columns)
# deep check for 1-row case
for width in [2, 3]:
levels_poss = itertools.product(
itertools.permutations([0, 1, 2], width),
repeat=2)
for levels in levels_poss:
columns = MultiIndex(levels=levels,
labels=[[0, 0, 1, 1],
[0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)])
for stack_lev in range(2):
df_stacked = df.stack(stack_lev)
manual_compare_stacked(df, df_stacked,
stack_lev, 1 - stack_lev)
# check multi-row case
mi = MultiIndex(levels=[["A", "C", "B"], ["B", "A", "C"]],
labels=[np.repeat(range(3), 3), np.tile(range(3), 3)])
df = DataFrame(columns=mi, index=range(5),
data=np.arange(5 * len(mi)).reshape(5, -1))
manual_compare_stacked(df, df.stack(0), 0, 1)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]],
names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'), (
'f2', 's1'), ('f2', 's2'), ('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.loc(axis=1)[df.columns.map(
lambda u: u[0] in ['f2', 'f3'])]
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
assert (result.columns == ['f2', 'f3']).all()
def test_join(self):
a = self.frame.loc[self.frame.index[:5], ['A']]
b = self.frame.loc[self.frame.index[2:], ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
assert not np.isnan(joined.values).all()
# TODO what should join do with names ?
tm.assert_frame_equal(joined, expected, check_names=False)
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel()
swapped2 = self.frame['A'].swaplevel(0)
swapped3 = self.frame['A'].swaplevel(0, 1)
swapped4 = self.frame['A'].swaplevel('first', 'second')
assert not swapped.index.equals(self.frame.index)
tm.assert_series_equal(swapped, swapped2)
tm.assert_series_equal(swapped, swapped3)
tm.assert_series_equal(swapped, swapped4)
back = swapped.swaplevel()
back2 = swapped.swaplevel(0)
back3 = swapped.swaplevel(0, 1)
back4 = swapped.swaplevel('second', 'first')
assert back.index.equals(self.frame.index)
tm.assert_series_equal(back, back2)
tm.assert_series_equal(back, back3)
tm.assert_series_equal(back, back4)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
tm.assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
with catch_warnings(record=True):
panel = Panel({'ItemA': self.frame, 'ItemB': self.frame * 2})
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
for result in (panel.swaplevel(axis='major'),
panel.swaplevel(0, axis='major'),
panel.swaplevel(0, 1, axis='major')):
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
tm.assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
tm.assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
tm.assert_frame_equal(result, expected)
with tm.assert_raises_regex(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with tm.assert_raises_regex(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
assert isinstance(df.columns, MultiIndex)
assert (df[2000, 1, 10] == df[2000, 1, 7]).all()
def test_alignment(self):
x = Series(data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), (
"A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), (
"Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
assert (df['foo'].values == 0).all()
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sort_index(level=0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
pytest.raises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
assert (df['foo', 'one'] == 0).all()
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
tm.assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
tm.assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
tm.assert_series_equal(result, expect, check_names=False)
assert result.index.name == 'b'
result = series.count(level='a')
expect = self.series.count(level=0)
tm.assert_series_equal(result, expect, check_names=False)
assert result.index.name == 'a'
pytest.raises(KeyError, series.count, 'x')
pytest.raises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
@pytest.mark.parametrize('sort', [True, False])
def test_series_group_min_max(self, sort):
# GH 17537
for op, level, skipna in cart_product(self.AGG_FUNCTIONS, lrange(2),
[False, True]):
grouped = self.series.groupby(level=level, sort=sort)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level)
tm.assert_series_equal(leftside, rightside)
@pytest.mark.parametrize('sort', [True, False])
def test_frame_group_ops(self, sort):
# GH 17537
self.frame.iloc[1, [1, 2]] = np.nan
self.frame.iloc[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis, sort=sort)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level, axis=axis)
frame = frame.sort_index(level=level, axis=axis)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
tm.assert_index_equal(leftside._get_axis(axis), level_index)
tm.assert_index_equal(rightside._get_axis(axis), level_index)
tm.assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
tm.assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
tm.assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
tm.assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10), np.tile(
np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
tm.assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
tm.assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
tm.assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
tm.assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
# TODO groupby with level_values drops names
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.names == self.ymd.index.names[:2]
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
tm.assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'), (
'bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df._consolidate()
def test_ix_preserve_names(self):
result = self.ymd.loc[2000]
result2 = self.ymd['A'].loc[2000]
assert result.index.names == self.ymd.index.names[1:]
assert result2.index.names == self.ymd.index.names[1:]
result = self.ymd.loc[2000, 2]
result2 = self.ymd['A'].loc[2000, 2]
assert result.index.name == self.ymd.index.names[2]
assert result2.index.name == self.ymd.index.names[2]
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.loc[2000, 4] = 0
exp.loc[2000, 4].values[:] = 0
tm.assert_frame_equal(df, exp)
df['A'].loc[2000, 4] = 1
exp['A'].loc[2000, 4].values[:] = 1
tm.assert_frame_equal(df, exp)
df.loc[2000] = 5
exp.loc[2000].values[:] = 5
tm.assert_frame_equal(df, exp)
# this works...for now
df['A'].iloc[14] = 5
assert df['A'][14] == 5
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
assert unstacked['A', 1].dtype == np.float64
assert unstacked['E', 1].dtype == np.object_
assert unstacked['F', 1].dtype == np.float64
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
assert result.shape == (500, 2)
# test roundtrip
stacked = result.stack()
tm.assert_series_equal(s, stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
assert result.shape == (500, 2)
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)] +
[labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
assert result.shape == (500, 2)
def test_getitem_lowerdim_corner(self):
pytest.raises(KeyError, self.frame.loc.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.loc[('bar', 'three'), 'B'] = 0
assert self.frame.sort_index().loc[('bar', 'three'), 'B'] == 0
# ---------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
pytest.skip("skipping for now")
result = self.ymd.loc[2000, 0]
expected = self.ymd.loc[2000]['A']
tm.assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.loc[2000, 0] = 0
# assert (self.ymd.loc[2000]['A'] == 0).all()
# Pretty sure the second (and maybe even the first) is already wrong.
pytest.raises(Exception, self.ymd.loc.__getitem__, (2000, 6))
pytest.raises(Exception, self.ymd.loc.__getitem__, (2000, 6), 0)
# ---------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0), (
'foo', 'qux', 0)], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.loc[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
pytest.raises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.loc[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'), (
'foo', 'qux')], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.loc[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = frame.loc[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
tm.assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
tm.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('unicode_strings', [True, False])
def test_mixed_depth_get(self, unicode_strings):
# If unicode_strings is True, the column labels in dataframe
# construction will use unicode strings in Python 2 (pull request
# #17099).
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
if unicode_strings:
arrays = [[u(s) for s in arr] for arr in arrays]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', ''].rename('a')
tm.assert_series_equal(result, expected)
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
expected = expected.rename(('routine1', 'result1'))
tm.assert_series_equal(result, expected)
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
tm.assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
tm.assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
tm.assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
tm.assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.loc[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
tm.assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
tm.assert_series_equal(expected, result, check_names=False)
tm.assert_frame_equal(df1, df2)
assert result.name == 'a'
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
tm.assert_frame_equal(expected, result)
tm.assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.iloc[[0, 1, 2, 7, 8, 9]]
tm.assert_frame_equal(result, expected)
result = self.frame.T.reindex(['foo', 'qux'], axis=1, level=0)
tm.assert_frame_equal(result, expected.T)
result = self.frame.loc[['foo', 'qux']]
tm.assert_frame_equal(result, expected)
result = self.frame['A'].loc[['foo', 'qux']]
tm.assert_series_equal(result, expected['A'])
result = self.frame.T.loc[:, ['foo', 'qux']]
tm.assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.loc[['foo', 'bar']] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.loc['foo':'bar'] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.loc[['foo', 'bar']] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.loc['foo':'bar'] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.iloc[[0, 1, 2, 5, 6]]
tm.assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]]
tm.assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.iloc[[0, 1, 2, 5, 6]].T
tm.assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]].T
tm.assert_frame_equal(result, expected)
def test_drop_level_nonunique_datetime(self):
# GH 12701
idx = Index([2, 3, 4, 4, 5], name='id')
idxdt = pd.to_datetime(['201603231400',
'201603231500',
'201603231600',
'201603231600',
'201603231700'])
df = DataFrame(np.arange(10).reshape(5, 2),
columns=list('ab'), index=idx)
df['tstamp'] = idxdt
df = df.set_index('tstamp', append=True)
ts = Timestamp('201603231600')
assert not df.index.is_unique
result = df.drop(ts, level='tstamp')
expected = df.loc[idx != 4]
tm.assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
assert result.index.names == ('one', 'two')
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples([('1a', '2a'), ('1a', '2b'), ('1a', '2c')
])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
assert df['new'].isna().all()
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.loc[subset] = 99
assert (self.frame.loc[subset].values == 99).all()
col = self.frame['B']
col[subset] = 97
assert (self.frame.loc[subset, 'B'] == 97).all()
def test_frame_dict_constructor_empty_series(self):
s1 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)]))
s2 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_indexing_ambiguity_bug_1678(self):
columns = MultiIndex.from_tuples([('Ohio', 'Green'), ('Ohio', 'Red'), (
'Colorado', 'Green')])
index = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)
])
frame = DataFrame(np.arange(12).reshape((4, 3)), index=index,
columns=columns)
result = frame.iloc[:, 1]
exp = frame.loc[:, ('Ohio', 'Red')]
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
df = DataFrame([[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]],
columns=list("ABCD"))
df = df.set_index(['A', 'B'])
ix = MultiIndex.from_tuples([(1, 1)])
df.loc[ix, "C"] = '_'
assert (df.xs((1, 1))['C'] == '_').all()
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
_index._SIZE_CUTOFF = old_cutoff
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ("faz", "boo")
df.index = l
repr(df)
# this travels an improper code path
l[0] = ["faz", "boo"]
df.index = l
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0,
1, 2, 3]])
assert isna(index[4][0])
assert isna(index.values[4][0])
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo', 'demo', 'demo', 'demo']
idx = MultiIndex.from_tuples(idx_tp, names=['STK_ID', 'RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
assert len(result) == 3
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['bah', 'bam', 3.0, 3],
['bah', 'bam', 4.0, 4], ['foo', 'bar', 5.0, 5],
['bah', 'bam', 6.0, 6]],
columns=list('ABCD'))
df = df.set_index(['A', 'B'])
df = df.sort_index(level=0)
expected = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['foo', 'bar', 5.0, 5]],
columns=list('ABCD')).set_index(['A', 'B'])
result = df.loc[('foo', 'bar')]
tm.assert_frame_equal(result, expected)
def test_duplicated_drop_duplicates(self):
# GH 4060
idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2, 3], [1, 1, 1, 1, 2, 2]))
expected = np.array(
[False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep='last'), expected)
expected = np.array([True, False, False, True, False, False])
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df['tuples'] = tuples
index = MultiIndex.from_tuples(df['tuples'])
# it works!
df.set_index(index)
def test_datetimeindex(self):
idx1 = pd.DatetimeIndex(
['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'
] * 2, tz='Asia/Tokyo')
idx2 = pd.date_range('2010/01/01', periods=6, freq='M',
tz='US/Eastern')
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00',
'2013-04-03 9:00'], tz='Asia/Tokyo')
tm.assert_index_equal(idx.levels[0], expected1)
tm.assert_index_equal(idx.levels[1], idx2)
# from datetime combos
# GH 7888
date1 = datetime.date.today()
date2 = datetime.datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product(
[date1, date2, date3], [date1, date2, date3]):
index = MultiIndex.from_product([[d1], [d2]])
assert isinstance(index.levels[0], pd.DatetimeIndex)
assert isinstance(index.levels[1], pd.DatetimeIndex)
def test_constructor_with_tz(self):
index = pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
name='dt1', tz='US/Pacific')
columns = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'],
name='dt2', tz='Asia/Tokyo')
result = MultiIndex.from_arrays([index, columns])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
def test_set_index_datetime(self):
# GH 3950
df = DataFrame(
{'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
'value': range(6)})
df.index = pd.to_datetime(df.pop('datetime'), utc=True)
df.index = df.index.tz_convert('US/Pacific')
expected = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], name='datetime')
expected = expected.tz_localize('UTC').tz_convert('US/Pacific')
df = df.set_index('label', append=True)
tm.assert_index_equal(df.index.levels[0], expected)
tm.assert_index_equal(df.index.levels[1],
Index(['a', 'b'], name='label'))
df = df.swaplevel(0, 1)
tm.assert_index_equal(df.index.levels[0],
Index(['a', 'b'], name='label'))
tm.assert_index_equal(df.index.levels[1], expected)
df = DataFrame(np.random.random(6))
idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
tz='US/Eastern')
idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00',
'2012-04-01 09:00', '2012-04-02 09:00',
'2012-04-02 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Eastern')
expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
# GH 7092
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
def test_reset_index_datetime(self):
# GH 3950
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz,
name='idx1')
idx2 = Index(range(5), name='idx2', dtype='int64')
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
idx3 = pd.date_range('1/1/2012', periods=5, freq='MS',
tz='Europe/Paris', name='idx3')
idx = MultiIndex.from_arrays([idx1, idx2, idx3])
df = DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'idx3': [datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1),
datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1),
datetime.datetime(2012, 5, 1)],
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'idx3', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: Timestamp(d, tz=tz))
expected['idx3'] = expected['idx3'].apply(
lambda d: Timestamp(d, tz='Europe/Paris'))
tm.assert_frame_equal(df.reset_index(), expected)
# GH 7793
idx = MultiIndex.from_product([['a', 'b'], pd.date_range(
'20130101', periods=3, tz=tz)])
df = DataFrame(
np.arange(6, dtype='int64').reshape(
6, 1), columns=['a'], index=idx)
expected = DataFrame({'level_0': 'a a a b b b'.split(),
'level_1': [
datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 2),
datetime.datetime(2013, 1, 3)] * 2,
'a': np.arange(6, dtype='int64')},
columns=['level_0', 'level_1', 'a'])
expected['level_1'] = expected['level_1'].apply(
lambda d: Timestamp(d, freq='D', tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_reset_index_period(self):
# GH 7746
idx = MultiIndex.from_product(
[pd.period_range('20130101', periods=3, freq='M'), list('abc')],
names=['month', 'feature'])
df = DataFrame(np.arange(9, dtype='int64').reshape(-1, 1),
index=idx, columns=['a'])
expected = DataFrame({
'month': ([pd.Period('2013-01', freq='M')] * 3 +
[pd.Period('2013-02', freq='M')] * 3 +
[pd.Period('2013-03', freq='M')] * 3),
'feature': ['a', 'b', 'c'] * 3,
'a': np.arange(9, dtype='int64')
}, columns=['month', 'feature', 'a'])
tm.assert_frame_equal(df.reset_index(), expected)
def test_reset_index_multiindex_columns(self):
levels = [['A', ''], ['B', 'b']]
df = DataFrame([[0, 2], [1, 3]],
columns=MultiIndex.from_tuples(levels))
result = df[['B']].rename_axis('A').reset_index()
tm.assert_frame_equal(result, df)
# gh-16120: already existing column
with tm.assert_raises_regex(ValueError,
(r"cannot insert \('A', ''\), "
"already exists")):
df.rename_axis('A').reset_index()
# gh-16164: multiindex (tuple) full key
result = df.set_index([('A', '')]).reset_index()
tm.assert_frame_equal(result, df)
# with additional (unnamed) index level
idx_col = DataFrame([[0], [1]],
columns=MultiIndex.from_tuples([('level_0', '')]))
expected = pd.concat([idx_col, df[[('B', 'b'), ('A', '')]]], axis=1)
result = df.set_index([('B', 'b')], append=True).reset_index()
tm.assert_frame_equal(result, expected)
# with index name which is a too long tuple...
with tm.assert_raises_regex(ValueError,
("Item must have length equal to number "
"of levels.")):
df.rename_axis([('C', 'c', 'i')]).reset_index()
# or too short...
levels = [['A', 'a', ''], ['B', 'b', 'i']]
df2 = DataFrame([[0, 2], [1, 3]],
columns=MultiIndex.from_tuples(levels))
idx_col = DataFrame([[0], [1]],
columns=MultiIndex.from_tuples([('C', 'c', 'ii')]))
expected = pd.concat([idx_col, df2], axis=1)
result = df2.rename_axis([('C', 'c')]).reset_index(col_fill='ii')
tm.assert_frame_equal(result, expected)
# ... which is incompatible with col_fill=None
with tm.assert_raises_regex(ValueError,
("col_fill=None is incompatible with "
r"incomplete column name \('C', 'c'\)")):
df2.rename_axis([('C', 'c')]).reset_index(col_fill=None)
# with col_level != 0
result = df2.rename_axis([('c', 'ii')]).reset_index(col_level=1,
col_fill='C')
tm.assert_frame_equal(result, expected)
def test_set_index_period(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = pd.period_range('2011-01-01', periods=3, freq='M')
idx1 = idx1.append(idx1)
idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
idx2 = idx2.append(idx2).append(idx2)
idx3 = pd.period_range('2005', periods=6, freq='A')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.period_range('2011-01-01', periods=3, freq='M')
expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
def test_repeat(self):
# GH 9361
# fixed by # GH 7891
m_idx = MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6), (7, 8)])
data = ['a', 'b', 'c', 'd']
m_df = Series(data, index=m_idx)
assert m_df.repeat(3).shape == (3 * len(data), )
def test_iloc_mi(self):
# GH 13797
# Test if iloc can handle integer locations in MultiIndexed DataFrame
data = [['str00', 'str01'], ['str10', 'str11'], ['str20', 'srt21'],
['str30', 'str31'], ['str40', 'str41']]
mi = MultiIndex.from_tuples(
[('CC', 'A'), ('CC', 'B'), ('CC', 'B'), ('BB', 'a'), ('BB', 'b')])
expected = DataFrame(data)
df_mi = DataFrame(data, index=mi)
result = DataFrame([[df_mi.iloc[r, c] for c in range(2)]
for r in range(5)])
tm.assert_frame_equal(result, expected)
class TestSorted(Base):
""" everything you wanted to test about sorting """
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
assert result.index.names == self.frame.index.names
def test_sorting_repr_8017(self):
np.random.seed(0)
data = np.random.randn(3, 4)
for gen, extra in [([1., 3., 2., 5.], 4.), ([1, 3, 2, 5], 4),
([Timestamp('20130101'), Timestamp('20130103'),
Timestamp('20130102'), Timestamp('20130105')],
Timestamp('20130104')),
(['1one', '3one', '2one', '5one'], '4one')]:
columns = MultiIndex.from_tuples([('red', i) for i in gen])
df = DataFrame(data, index=list('def'), columns=columns)
df2 = pd.concat([df,
DataFrame('world', index=list('def'),
columns=MultiIndex.from_tuples(
[('red', extra)]))], axis=1)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
assert str(df2).splitlines()[0].split() == ['red']
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
tm.assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[('red', extra)] = 'world'
result = result.sort_index(axis=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level(self):
df = self.frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = self.frame['A'].sort_index(level=0)
# preserve names
assert a_sorted.index.names == self.frame.index.names
# inplace
rs = self.frame.copy()
rs.sort_index(level=0, inplace=True)
tm.assert_frame_equal(rs, self.frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sort_index(level=0)
assert result.index.lexsort_depth == 3
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sort_index(level='second')
expected = self.frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self):
sorted_before = self.frame.sort_index(level=1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before,
sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
assert index.is_lexsorted()
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]])
assert not index.is_lexsorted()
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]])
assert not index.is_lexsorted()
assert index.lexsort_depth == 0
def test_getitem_multilevel_index_tuple_not_sorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.loc[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c'])
xp = Series(['x'], index=xp_idx, name='data')
tm.assert_series_equal(rs, xp)
def test_getitem_slice_not_sorted(self):
df = self.frame.sort_index(level=1).T
# buglet with int typechecking
result = df.iloc[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted2(self):
# 13431
df = DataFrame({'col1': ['b', 'd', 'b', 'a'],
'col2': [3, 1, 1, 2],
'data': ['one', 'two', 'three', 'four']})
df2 = df.set_index(['col1', 'col2'])
df2_original = df2.copy()
df2.index.set_levels(['b', 'd', 'a'], level='col1', inplace=True)
df2.index.set_labels([0, 1, 0, 2], level='col1', inplace=True)
assert not df2.index.is_lexsorted()
assert not df2.index.is_monotonic
assert df2_original.index.equals(df2.index)
expected = df2.sort_index()
assert expected.index.is_lexsorted()
assert expected.index.is_monotonic
result = df2.sort_index(level=0)
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns.values)]
result = df['foo']
result2 = df.loc[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.loc['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index.values)]
result = s['qux']
result2 = s.loc['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
def test_sort_index_and_reconstruction(self):
# 15622
# lexsortedness should be identical
# across MultiIndex consruction methods
df = DataFrame([[1, 1], [2, 2]], index=list('ab'))
expected = DataFrame([[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples([(0.5, 'a'),
(0.5, 'b'),
(0.8, 'a'),
(0.8, 'b')]))
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list('ab')]))
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(levels=[[0.5, 0.8], ['a', 'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
result = result.sort_index()
assert result.index.is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# 14015
df = DataFrame([[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, '20160811 12:00:00'),
(0, '20160809 12:00:00')],
names=['l1', 'Date']))
df.columns.set_levels(pd.to_datetime(df.columns.levels[1]),
level=1,
inplace=True)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
result = df.sort_index(axis=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
result = df.sort_index(axis=1, level=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame({'value': [1, 2, 3, 4]},
index=MultiIndex(
levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame({'value': [2, 1, 4, 3]},
index=MultiIndex(
levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_reorder_on_ops(self):
# 15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[['a', 'b'], ['big', 'small'], ['red', 'blu']],
names=['letter', 'size', 'color']),
columns=['near', 'far'])
df = df.sort_index()
def my_func(group):
group.index = ['newz', 'newa']
return group
result = df.groupby(level=['letter', 'size']).apply(
my_func).sort_index()
expected = MultiIndex.from_product(
[['a', 'b'], ['big', 'small'], ['newa', 'newz']],
names=['letter', 'size', None])
tm.assert_index_equal(result.index, expected)
def test_sort_non_lexsorted(self):
# degenerate case where we sort but don't
# have a satisfying result :<
# GH 15797
idx = MultiIndex([['A', 'B', 'C'],
['c', 'b', 'a']],
[[0, 1, 2, 0, 1, 2],
[0, 2, 1, 1, 0, 2]])
df = DataFrame({'col': range(len(idx))},
index=idx,
dtype='int64')
assert df.index.is_lexsorted() is False
assert df.index.is_monotonic is False
sorted = df.sort_index()
assert sorted.index.is_lexsorted() is True
assert sorted.index.is_monotonic is True
expected = DataFrame(
{'col': [1, 4, 5, 2]},
index=MultiIndex.from_tuples([('B', 'a'), ('B', 'c'),
('C', 'a'), ('C', 'b')]),
dtype='int64')
result = sorted.loc[pd.IndexSlice['B':'C', 'a':'c'], :]
tm.assert_frame_equal(result, expected)
def test_sort_index_nan(self):
# GH 14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4),
index=mi, columns=list('ABCD'))
s = Series(np.arange(4), index=mi)
df2 = DataFrame({
'date': pd.to_datetime([
'20121002', '20121007', '20130130', '20130202', '20130305',
'20121002', '20121207', '20130130', '20130202', '20130305',
'20130202', '20130305'
]),
'user_id': [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
'whole_cost': [1790, np.nan, 280, 259, np.nan, 623, 90, 312,
np.nan, 301, 359, 801],
'cost': [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12]
}).set_index(['date', 'user_id'])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position='last')
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position='first')
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position='last')
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position='first')
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_ascending_list(self):
# GH: 16934
# Set up a Series with a three level MultiIndex
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'],
[4, 3, 2, 1, 4, 3, 2, 1]]
tuples = lzip(*arrays)
mi = MultiIndex.from_tuples(tuples, names=['first', 'second', 'third'])
s = Series(range(8), index=mi)
# Sort with boolean ascending
result = s.sort_index(level=['third', 'first'], ascending=False)
expected = s.iloc[[4, 0, 5, 1, 6, 2, 7, 3]]
tm.assert_series_equal(result, expected)
# Sort with list of boolean ascending
result = s.sort_index(level=['third', 'first'],
ascending=[False, True])
expected = s.iloc[[0, 4, 1, 5, 2, 6, 3, 7]]
tm.assert_series_equal(result, expected)
| bsd-3-clause |
abhishekgahlot/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
probml/pyprobml | scripts/hbayes_bernoulli_bap_pymc3.py | 1 | 2026 | # From chapter 2 of
# https://github.com/aloctavodia/BAP
import matplotlib.pyplot as plt
import scipy.stats as stats
import numpy as np
import pandas as pd
import seaborn as sns
import pymc3 as pm
import arviz as az
np.random.seed(123)
# Example from BAP
N_samples = np.array([30, 30, 30])
G_samples = np.array([18, 18, 18]) # [3, 3, 3] [18, 3, 3]
group_idx = np.repeat(np.arange(len(N_samples)), N_samples)
data = []
for i in range(0, len(N_samples)):
data.extend(np.repeat([1, 0], [G_samples[i], N_samples[i]-G_samples[i]]))
with pm.Model() as model_h:
μ = pm.Beta('μ', 1., 1.)
κ = pm.HalfNormal('κ', 10)
θ = pm.Beta('θ', alpha=μ*κ, beta=(1.0-μ)*κ, shape=len(N_samples))
y = pm.Bernoulli('y', p=θ[group_idx], observed=data)
trace_h = pm.sample(1000)
az.plot_trace(trace_h)
#plt.savefig('B11197_02_20.png', dpi=300)
az.summary(trace_h)
J = len(N_samples)
post_mean = np.zeros(J)
samples = trace_h['θ']
post_mean = np.mean(samples, axis=0)
post_hyper_mean = trace_h['μ'].mean()
mle = G_samples / N_samples
pooled_mle = np.sum(G_samples) / np.sum(N_samples)
axes = az.plot_forest(
trace_h, var_names='θ', combined=False, colors='cycle')
y_lims = axes[0].get_ylim()
axes[0].vlines(post_hyper_mean, *y_lims)
axes = az.plot_forest(
trace_h, var_names='θ', combined=True, colors='cycle',
kind='ridgeplot')
# Show posterior over hparans
fig, ax= plt.subplots(1,1)
x = np.linspace(0, 1, 100)
for i in np.random.randint(0, len(trace_h), size=100):
u = trace_h['μ'][i]
k = trace_h['κ'][i]
pdf = stats.beta(u*k, (1.0-u)*k).pdf(x)
ax.plot(x, pdf, 'C1', alpha=0.2)
u_mean = trace_h['μ'].mean()
k_mean = trace_h['κ'].mean()
dist = stats.beta(u_mean*k_mean, (1.0-u_mean)*k_mean)
pdf = dist.pdf(x)
mode = x[np.argmax(pdf)]
mean = dist.moment(1)
ax.plot(x, pdf, lw=3, label=f'mode = {mode:.2f}\nmean = {mean:.2f}')
ax.set_yticks([])
ax.legend()
ax.set_xlabel('$θ_{prior}$')
plt.tight_layout()
#plt.savefig('B11197_02_21.png', dpi=300)
| mit |
emmanuelle/scikits.image | doc/examples/plot_canny.py | 2 | 1625 | """
===================
Canny edge detector
===================
The Canny filter is a multi-stage edge detector. It uses a filter based on the
derivative of a Gaussian in order to compute the intensity of the gradients.The
Gaussian reduces the effect of noise present in the image. Then, potential
edges are thinned down to 1-pixel curves by removing non-maximum pixels of the
gradient magnitude. Finally, edge pixels are kept or removed using hysteresis
thresholding on the gradient magnitude.
The Canny has three adjustable parameters: the width of the Gaussian (the
noisier the image, the greater the width), and the low and high threshold for
the hysteresis thresholding.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from skimage import filter
# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1
im = ndimage.rotate(im, 15, mode='constant')
im = ndimage.gaussian_filter(im, 4)
im += 0.2 * np.random.random(im.shape)
# Compute the Canny filter for two values of sigma
edges1 = filter.canny(im)
edges2 = filter.canny(im, sigma=3)
# display results
plt.figure(figsize=(8, 3))
plt.subplot(131)
plt.imshow(im, cmap=plt.cm.jet)
plt.axis('off')
plt.title('noisy image', fontsize=20)
plt.subplot(132)
plt.imshow(edges1, cmap=plt.cm.gray)
plt.axis('off')
plt.title('Canny filter, $\sigma=1$', fontsize=20)
plt.subplot(133)
plt.imshow(edges2, cmap=plt.cm.gray)
plt.axis('off')
plt.title('Canny filter, $\sigma=3$', fontsize=20)
plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
| bsd-3-clause |
smenon8/AnimalWildlifeEstimator | script/ImageShareabilityRegressors.py | 1 | 4993 | # python-3
# Author Name : Sreejith Menon ([email protected])
import ClassiferHelperAPI as CH
import numpy as np
import pandas as pd
import plotly.plotly as py
from collections import Counter
import csv
import plotly.graph_objs as go
import RegressionCapsuleClass as RgrCls
def plot(rgrObj,arr,arr_min,title,flNm,range,errorBar = True):
trace1 = go.Scatter(
x = list(rgrObj.preds),
y = list(rgrObj.test_y),
error_y = dict(
type='data',
symmetric = False,
array = arr,
arrayminus = arr_min,
visible=errorBar
),
mode = 'markers'
)
layout= go.Layout(
title= title,
xaxis= dict(
title= 'Predicted Share Rate',
ticklen= 5,
zeroline= False,
gridwidth= 2,
range=[-5,110]
),
yaxis=dict(
title= 'Actual Share rate',
ticklen= 5,
gridwidth= 2,
#range=range
)
)
data = [trace1]
fig = dict(data=data,layout=layout)
a = py.iplot(fig,filename=flNm)
return a
def runRgr(methodName,attribType):
if attribType == "beauty":
inpData = pd.DataFrame.from_csv("../data/BeautyFtrVector_GZC.csv")
inpData.reindex(np.random.permutation(inpData.index))
y = inpData['Proportion']
inpData.drop(['Proportion'],1,inplace=True)
train_x, test_x, train_y, test_y = CH.train_test_split(inpData, y, test_size = 0.4)
rgr = CH.getLearningAlgo(methodName,{'fit_intercept':True})
rgrObj = RgrCls.RegressionCapsule(rgr,methodName,0.8,train_x,train_y,test_x,test_y)
else:
train_data_fl = "../FinalResults/ImgShrRnkListWithTags.csv"
infoGainFl = "../data/infoGainsExpt2.csv"
allAttribs = CH.genAllAttribs(train_data_fl,attribType,infoGainFl)
rgrObj = CH.buildRegrMod(train_data_fl,allAttribs,0.6,methodName,kwargs={'fit_intercept':True})
rgrObj.runRgr(computeMetrics=True,removeOutliers=True)
x = [i for i in range(len(rgrObj.preds))]
errors = [list(rgrObj.test_y)[i] - list(rgrObj.preds)[i] for i in range(len(rgrObj.preds))]
arr = [-1 * errors[i] if errors[i] < 0 else 0 for i in range(len(errors)) ]
arr_min = [errors[i] if errors[i] > 0 else 0 for i in range(len(errors)) ]
return rgrObj,arr,arr_min
def plotWithErrBars(attribTypes, rgrAlgoTypes):
embedCodes = []
for attrib in attribTypes:
code = []
for alg in rgrAlgoTypes:
rgrObj, arr,arr_min = runRgr(alg,attrib)
title = "%s regression results using %s attributes" %(alg,attrib)
flNm = "%s_regession_%s_attributes_%s" %(alg,attrib,str(True))
a = plot(rgrObj,arr,arr_min,title,flNm,[-100,200],errorBar = True)
code.append(a.embed_code)
embedCodes.append(code)
for code in embedCodes:
print(code)
print()
def plotWithoutErrBars(attribTypes, rgrAlgoTypes):
embedCodes = []
for attrib in attribTypes:
code = []
for alg in rgrAlgoTypes:
rgrObj, _, _ = runRgr(alg,attrib)
print("Absolute error for %s using %s : %f" %(alg,attrib,rgrObj.abserr))
print("Mean Squared error for %s using %s : %f" %(alg,attrib,rgrObj.sqerr))
title = "%s regression results using %s attributes" %(alg,attrib)
flNm = "%s_regession_%s_attributes_%s" %(alg,attrib,str(False))
a = plot(rgrObj,[],[],title,flNm,[-10,110],errorBar = False)
code.append(a.embed_code)
embedCodes.append(code)
for code in embedCodes:
print(code)
print()
def plotResiduals(attribTypes, rgrAlgoTypes):
embedCodes = []
for attrib in attribTypes:
code = []
for alg in rgrAlgoTypes:
rgrObj, _, _ = runRgr(alg,attrib)
x_data = rgrObj.preds
y_data = rgrObj.residues
title = "%s regression residues using %s attributes" %(alg,attrib)
flNm = "%s_regession_%s_attributes_%s" %(alg,attrib,"residues")
# Create a trace
trace = go.Scatter(
x = x_data,
y = y_data,
mode = 'markers'
)
layout= go.Layout(
title= title,
xaxis= dict(
title= 'Predicted Share Rate',
ticklen= 5
),
yaxis=dict(
title= 'Residues',
ticklen= 5
)
)
data = [trace]
# Plot and embed in ipython notebook!
fig = dict(data=data,layout=layout)
a = py.iplot(fig,filename=flNm)
code.append(a.embed_code)
embedCodes.append(code)
for code in embedCodes:
print(code)
print()
def __main__():
# attribTypes = ['sparse', 'non_sparse', 'non_zero', 'abv_mean']
attribTypes = ['beauty']
rgrAlgoTypes = ['linear', 'ridge', 'lasso', 'elastic_net', 'svr', 'linear_svr', 'dtree_regressor']
# plotWithErrBars(attribTypes, rgrAlgoTypes)
# print("\n\n\n\n\n")
# plotWithoutErrBars(attribTypes, rgrAlgoTypes)
plotResiduals(attribTypes, rgrAlgoTypes)
if __name__ == "__main__":
__main__() | bsd-3-clause |
josenavas/QiiTa | qiita_pet/handlers/study_handlers/prep_template.py | 1 | 3836 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from os.path import join
from tornado.web import authenticated
from tornado.escape import url_escape
import pandas as pd
from qiita_pet.handlers.util import to_int
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_db.util import (get_files_from_uploads_folders, get_mountpoint,
supported_filepath_types)
from qiita_pet.handlers.api_proxy import (
prep_template_ajax_get_req, new_prep_template_get_req,
prep_template_summary_get_req)
class NewPrepTemplateAjax(BaseHandler):
@authenticated
def get(self):
study_id = to_int(self.get_argument('study_id'))
result = new_prep_template_get_req(study_id)
self.render('study_ajax/add_prep_template.html',
prep_files=result['prep_files'],
data_types=result['data_types'],
ontology=result['ontology'],
study_id=study_id)
class PrepTemplateSummaryAJAX(BaseHandler):
@authenticated
def get(self):
prep_id = to_int(self.get_argument('prep_id'))
res = prep_template_summary_get_req(prep_id, self.current_user.id)
self.render('study_ajax/prep_summary_table.html', pid=prep_id,
stats=res['summary'], editable=res['editable'],
num_samples=res['num_samples'])
class PrepTemplateAJAX(BaseHandler):
@authenticated
def get(self):
"""Send formatted summary page of prep template"""
prep_id = to_int(self.get_argument('prep_id'))
row_id = self.get_argument('row_id', '0')
res = prep_template_ajax_get_req(self.current_user.id, prep_id)
res['prep_id'] = prep_id
res['row_id'] = row_id
# Escape the message just in case javascript breaking characters in it
res['alert_message'] = url_escape(res['alert_message'])
self.render('study_ajax/prep_summary.html', **res)
class PrepFilesHandler(BaseHandler):
@authenticated
def get(self):
study_id = self.get_argument('study_id')
prep_file = self.get_argument('prep_file')
prep_type = self.get_argument('type')
# TODO: Get file types for the artifact type
# FILE TYPE IN POSTION 0 MUST BE DEFAULT FOR SELECTED
file_types = supported_filepath_types(prep_type)
selected = []
not_selected = []
_, base = get_mountpoint("uploads")[0]
uploaded = get_files_from_uploads_folders(study_id)
prep = pd.read_table(join(base, study_id, prep_file), sep='\t')
if 'run_prefix' in prep.columns:
# Use run_prefix column of prep template to auto-select
# per-prefix uploaded files if available.
per_prefix = True
prep_prefixes = set(prep['run_prefix'])
for _, filename in uploaded:
for prefix in prep_prefixes:
if filename.startswith(prefix):
selected.append(filename)
else:
not_selected.append(filename)
else:
per_prefix = False
not_selected = [f for _, f in uploaded]
# Write out if this prep template supports per-prefix files, and the
# as well as pre-selected and remaining files
self.write({
'per_prefix': per_prefix,
'file_types': file_types,
'selected': selected,
'remaining': not_selected})
| bsd-3-clause |
hsinhuang/codebase | ntumltwo-001/hw3.py | 1 | 2956 | import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from decision_tree import DecisionTree
from bagging import Bagging
def load(filename):
dataset = np.loadtxt(filename)
return dataset[:, :-1], dataset[:, -1]
def load_train():
return load('hw3_train.dat')
def load_test():
return load('hw3_test.dat')
def q13():
tree = DecisionTree()
tree.fit(*load_train())
print tree.__prepr__()
print tree.node_count
def _q13():
X, y = load_train()
tree = DecisionTreeClassifier(criterion='gini')
tree.fit(X, y)
print tree.tree_.node_count
def q14():
tree = DecisionTree()
tree.fit(*load_train())
print tree.ein
def _q14():
X, y = load_train()
tree = DecisionTreeClassifier(criterion='gini')
tree.fit(X, y)
print 1 - tree.score(X, y)
def q15():
tree = DecisionTree()
tree.fit(*load_train())
print tree.error(*load_test())
def _q15():
X, y = load_train()
tree = DecisionTreeClassifier(criterion='gini')
tree.fit(X, y)
print 1 - tree.score(*load_test())
def q16_bag(param):
def get_decision_tree():
return DecisionTree()
X, y = param
bagging = Bagging(get_decision_tree, T=300)
bagging.fit(X, y)
return bagging
def q16_print_ein(param):
bagging = q16_bag(param)
eins = [1 - m.score(*param) for m in bagging.models]
ein_g = sum(eins) * 1.0 / len(eins)
ein_G = 1 - bagging.score(*param)
eout_G = 1 - bagging.score(*load_test())
print '-' * 6
print 'q16:', ein_g
print 'q17:', ein_G
print 'q18:', eout_G
return (ein_g, ein_G, eout_G)
def q16():
X, y = load_train()
import multiprocessing as par
pool = par.Pool(processes=par.cpu_count())
R = 100
res = pool.map(q16_print_ein, [(X, y)] * R)
print '-' * 6
print '- q16:', sum([e[0] for e in res]) * 1.0 / len(res)
print '- q17:', sum([e[1] for e in res]) * 1.0 / len(res)
print '- q18:', sum([e[2] for e in res]) * 1.0 / len(res)
def q19_bag(param):
def get_decision_tree():
return DecisionTree(depth=1)
X, y = param
bagging = Bagging(get_decision_tree, T=300)
bagging.fit(X, y)
return bagging
def q19_print_ein(param):
bagging = q19_bag(param)
ein_G = 1 - bagging.score(*param)
eout_G = 1 - bagging.score(*load_test())
print '-' * 6
print 'q19:', ein_G
print 'q20:', eout_G
return (ein_G, eout_G)
def q19():
X, y = load_train()
import multiprocessing as par
pool = par.Pool(processes=par.cpu_count())
R = 100
res = pool.map(q19_print_ein, [(X, y)] * R)
print '-' * 6
print '- q19:', sum([e[0] for e in res]) * 1.0 / len(res)
print '- q20:', sum([e[1] for e in res]) * 1.0 / len(res)
def run(func):
print '=' * 3 + func.__name__ + '=' * 3
func()
if __name__ == '__main__':
qs = [q16, q19]
for q in qs:
run(q)
| gpl-2.0 |
JohanComparat/pySU | galaxy/bin_stack/plot_results_spectraStacks.py | 1 | 23266 | #! /usr/bin/env python
"""
This script produces the stacks for emission line luminosity limited samples.
"""
import sys
import matplotlib
#matplotlib.use('pdf')
import matplotlib.pyplot as p
import numpy as n
import os
from os.path import join
import astropy.io.fits as fits
G05 = n.loadtxt( join(os.environ['SPECTRASTACKS_DIR'], "biblioPoints", "gallazzi-2005.data"), unpack= True)
G06 = n.loadtxt( join(os.environ['SPECTRASTACKS_DIR'], "biblioPoints", "gallazzi-2006.data"), unpack= True)
path_to_summary_table = join(os.environ['SPECTRASTACKS_DIR'], "results", "table_fullSpecFit_v0.VA.fits")
data =fits.open(path_to_summary_table)[1].data
path_to_summary_table = join(os.environ['SPECTRASTACKS_DIR'], "results", "table_lineSpecFit_v0.VA.fits")
datL =fits.open(path_to_summary_table)[1].data
#first check that lline fits are compatible with the luminosity bin
fl = data['L_mean']/(4*n.pi*data['dL']**2.)
O2 = (data['lineWavelength']== 3728.)
O3 = (data['lineWavelength']== 5007.)
Hb = (data['lineWavelength']== 4862.) & (data['H1_4862_flux']>0)
chi2 = (data['H1_4862_flux'][Hb] - fl[Hb])/data['H1_4862_fluxErr'][Hb]
ebv_snr_limit = 5.
chi2_per_dof = 15
detect_O3_5007 = (datL['O3_5007_flux']>ebv_snr_limit*datL['O3_5007_fluxErr']) & (datL['O3_5007_flux']>0) & (datL['O3_5007_fluxErr']>0) &(datL['O3_5007_chi2']<chi2_per_dof*datL['O3_5007_ndof'])
detect_H1_4862 = (datL['H1_4862_flux']>ebv_snr_limit*datL['H1_4862_fluxErr']) & (datL['H1_4862_flux']>0) & (datL['H1_4862_fluxErr']>0) &(datL['H1_4862_chi2']<chi2_per_dof*datL['H1_4862_ndof'])
detect_H1_4341 = (datL['H1_4341_flux']>ebv_snr_limit*datL['H1_4341_fluxErr'])& (datL['H1_4341_flux']>0)&(datL['H1_4341_fluxErr']>0)&(datL['H1_4341_chi2']<chi2_per_dof*datL['H1_4341_ndof'])
detect_H1_4102 = (datL['H1_4102_flux']>ebv_snr_limit*datL['H1_4102_fluxErr'])& (datL['H1_4102_flux']>0)&(datL['H1_4102_fluxErr']>0)&(datL['H1_4102_chi2']<chi2_per_dof*datL['H1_4102_ndof'])
detect_all_H1 = detect_H1_4862 & detect_H1_4341 & detect_H1_4102
def_EBV_4862_4341 = (datL['EBV_4862_4341']!=-9999.99) & (datL['EBV_4862_4341_err']!=-9999.99)
def_EBV_4862_4102 = (datL['EBV_4862_4102']!=-9999.99) & (datL['EBV_4862_4102_err']!=-9999.99)
def_EBV_4102_4341 = (datL['EBV_4102_4341']!=-9999.99) & (datL['EBV_4102_4341_err']!=-9999.99)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.8])
p.errorbar(x=datL['age_lightW_mean'], xerr=[datL['age_lightW_err_minus'], datL['age_lightW_err_plus']], y=datL['metallicity_lightW_mean'], yerr=[datL['metallicity_lightW_mean_err_minus'], datL['metallicity_lightW_mean_err_plus']],fmt='o',elinewidth=1, mfc='none', label='ELG lightW')
y = G06[1]
x = 10**G06[4]
xerr = [10**(G06[4])-10**(G06[5]), 10**(G06[6])-10**(G06[4])]
yerr = [G06[1]-G06[2], G06[3]-G06[1]]
p.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='o',elinewidth=1, mfc='none', label='SDSS G06')
gl=p.legend(loc=0)
gl.set_frame_on(False)
p.xscale('log')
p.xlabel(r'$age/[yr]$')
p.ylabel(r'stellar $\log(Z/[Z_\odot])$')
p.xlim((10**6, 10**11))
p.ylim((-2.5, 0.5))
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "age-metal-lightW.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.8])
p.errorbar(x=datL['age_massW_mean'], xerr=[datL['age_massW_err_minus'], datL['age_massW_err_plus']], y=datL['metallicity_massW_mean'], yerr=[datL['metallicity_massW_mean_err_minus'], datL['metallicity_massW_mean_err_plus']],fmt='o',elinewidth=1, mfc='none', label='ELG massW')
y = G06[1]
x = 10**G06[4]
xerr = [10**(G06[4])-10**(G06[5]), 10**(G06[6])-10**(G06[4])]
yerr = [G06[1]-G06[2], G06[3]-G06[1]]
p.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='o',elinewidth=1, mfc='none', label='SDSS G06')
gl=p.legend(loc=0)
gl.set_frame_on(False)
p.xscale('log')
p.xlabel(r'$age/[yr]$')
p.ylabel(r'stellar $\log(Z/[Z_\odot])$')
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "age-metal-massW.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.8])
ok = def_EBV_4862_4341 & detect_H1_4862 & detect_H1_4341
print len(datL['spm_EBV'][ok])
p.errorbar(datL['spm_EBV'][ok], datL['EBV_4862_4341'][ok], yerr=datL['EBV_4862_4341_err'][ok],fmt='o',elinewidth=1, mfc='none')#,label='4862-4341')
#ok = def_EBV_4862_4102 & detect_H1_4862 & detect_H1_4102
#print len(datL['spm_EBV'][ok])
#p.errorbar(datL['spm_EBV'][ok], datL['EBV_4862_4102'][ok], yerr=datL['EBV_4862_4102_err'][ok],fmt='o',elinewidth=1, mfc='none',label='4862-4102')
ok = def_EBV_4102_4341 & detect_H1_4341 & detect_H1_4102
print len(datL['spm_EBV'][ok])
p.errorbar(datL['spm_EBV'][ok], datL['EBV_4102_4341'][ok], yerr=datL['EBV_4102_4341_err'][ok],fmt='o',elinewidth=1, mfc='none')#,label='4341-4102')
p.plot([-0.1,1.5],[-0.1,1.5],'k--')
#p.legend(loc=0)
p.xlabel('E(B-V) SPM')
p.ylabel(r'E(B-V) Balmer decrement')
p.xlim((-0.1,1.5))
p.ylim((-0.1,1.5))
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "ebv-comparison-1.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.8])
ok = def_EBV_4862_4102 & def_EBV_4862_4341 & detect_all_H1
print len(datL['spm_EBV'][ok])
p.errorbar(datL['EBV_4862_4102'][ok], datL['EBV_4862_4341'][ok], xerr=datL['EBV_4862_4102_err'][ok], yerr=datL['EBV_4862_4341_err'][ok],fmt='o',elinewidth=1, mfc='none',label='x:4862-4102, y:4862-4341')
ok = def_EBV_4862_4102 & def_EBV_4102_4341 & detect_all_H1
print len(datL['spm_EBV'][ok])
p.errorbar(datL['EBV_4862_4102'][ok], datL['EBV_4102_4341'][ok], xerr=datL['EBV_4862_4102_err'][ok], yerr=datL['EBV_4102_4341_err'][ok],fmt='o',elinewidth=1, mfc='none',label='x:4862-4102, y:4341-4102')
ok = def_EBV_4862_4341 & def_EBV_4102_4341 & detect_all_H1
print len(datL['spm_EBV'][ok])
p.errorbar(datL['EBV_4862_4341'][ok], datL['EBV_4102_4341'][ok], xerr=datL['EBV_4862_4341_err'][ok], yerr=datL['EBV_4102_4341_err'][ok],fmt='o',elinewidth=1, mfc='none',label='x:4862-4341, y:4341-4102')
p.plot([-0.1,1.5],[-0.1,1.5],'k--')
gl=p.legend(loc=0)
gl.set_frame_on(False)
p.xlabel(r'E(B-V) Balmer decrement x')#GP $H\beta -H\gamma$')
p.ylabel(r'E(B-V) Balmer decrement y')#GP $H\beta -H\delta$')
p.xlim((-0.1,1.2))
p.ylim((-0.1,1.2))
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "ebv-comparison-2.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.8])
ok = detect_O3_5007 & detect_H1_4862
x = datL['metallicity_lightW_mean'][ok]
y = n.log10(datL['O3_5007_flux'][ok]/datL['H1_4862_flux'][ok])
p.plot(x, y,'b+')
p.ylabel(r'log([OIII]/Hbeta')#GP $H\beta -H\gamma$')
p.xlabel(r'log(Z/Z0)')#GP $H\beta -H\delta$')
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "metal-lineRatio.png"))
p.clf()
sys.exit()
Nst = (data['N_in_stack']==200)#&(data['Survey']==2)
EBvok = (Nst)&(data['H1_4862_flux']>data['H1_4862_fluxErr']) & (data['H1_4862_flux']>0) & (data['H1_4862_fluxErr']>0) & (data['H1_4341_flux']>data['H1_4341_fluxErr'])& (data['H1_4341_flux']>0)&(data['H1_4341_fluxErr']>0)& (data['EBV_4862_4341']!=-9999.99) & (data['EBV_4862_4341_err']!=-9999.99)
O2 = (data['lineWavelength']== 3728.) & (EBvok)
O3 = (data['lineWavelength']== 5007.)& (EBvok)
Hb = (data['lineWavelength']== 4862.) & (EBvok)
data['Redshift'][O2]
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.8])
for zz in set(data['Redshift'][O2]):
ok = (O2)&(data['Redshift']==zz)
p.errorbar(data['L_mean'][ok], data['EBV_4862_4341'][ok], xerr=[data['L_mean'][ok]-data['L_min'][ok],-data['L_mean'][ok]+data['L_max'][ok]],yerr=data['EBV_4862_4341_err'][ok],fmt='o',elinewidth=1, mfc='none',label=str(n.round(zz,3)))
p.legend(loc=0)
p.xlabel('[OII] line Luminosity')
p.ylabel(r'E(B-V) GP $H\beta -H\delta$')
p.xscale('log')
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "ebv-line-o2.png"))
p.clf()
Nst = (datL['N_in_stack']==200)#&(datL['Survey']==2)
EBvok = (Nst)&(datL['H1_4862_flux']>datL['H1_4862_fluxErr']) & (datL['H1_4862_flux']>0) & (datL['H1_4862_fluxErr']>0) & (datL['H1_4341_flux']>datL['H1_4341_fluxErr'])& (datL['H1_4341_flux']>0)&(datL['H1_4341_fluxErr']>0)& (datL['EBV_4862_4341']!=-9999.99) & (datL['EBV_4862_4341_err']!=-9999.99)
O2 = (datL['lineWavelength']== 3728.) & (EBvok)
O3 = (datL['lineWavelength']== 5007.)& (EBvok)
Hb = (datL['lineWavelength']== 4862.) & (EBvok)
datL['Redshift'][O2]
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.8])
for zz in set(datL['Redshift'][O2]):
ok = (O2)&(datL['Redshift']==zz)
p.errorbar(datL['L_mean'][ok], datL['EBV_4862_4341'][ok], xerr=[datL['L_mean'][ok]-datL['L_min'][ok],-datL['L_mean'][ok]+datL['L_max'][ok]],yerr=datL['EBV_4862_4341_err'][ok],fmt='o',elinewidth=1, mfc='none',label=str(n.round(zz,3)))
p.legend(loc=0)
p.xlabel('[OII] line Luminosity')
p.ylabel(r'E(B-V) GP $H\beta -H\delta$')
p.xscale('log')
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "ebv-line-o2-gp.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.8])
ok = (data['logOH_tremonti04']!=-9999.99) & (data['logOH_tremonti04_err']!=-9999.99)
p.errorbar(x=data['spm_light_metallicity'][ok], xerr=[data['spm_light_metallicity_err_plus'][ok], data['spm_light_metallicity_err_minus'][ok]], y=data['logOH_tremonti04'][ok], yerr=data['logOH_tremonti04_err'][ok],fmt='o',elinewidth=2, mfc='none',label='full')
ok = (datL['logOH_tremonti04']!=-9999.99) & (datL['logOH_tremonti04_err']!=-9999.99)
p.errorbar(x=datL['spm_light_metallicity'][ok], xerr=[datL['spm_light_metallicity_err_plus'][ok], datL['spm_light_metallicity_err_minus'][ok]], y=datL['logOH_tremonti04'][ok], yerr=datL['logOH_tremonti04_err'][ok],fmt='o',elinewidth=2, mfc='none',label='GP')
#p.plot([-1,2],[-1,2],'k--')
#p.legend(loc=0)
p.xlabel('log(Z/Zsun) SPM')
p.ylabel(r'12+log(O/H) (line ratios Tremonti 04 estimator)')
#p.xlim((-1,2))
#p.ylim((-1,2))
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "metal-comparison-1.png"))
p.clf()
# SFR comparison
o2_sfr_conv = 10**(0.27) * 10**(-41)
hb_sfr_conv = 10**(0.58) * 10**(-41)
sfrO2 = data['L_mean']*o2_sfr_conv
sfrO2_up = data['L_max']*o2_sfr_conv
sfrO2_low = data['L_min']*o2_sfr_conv
sfrHb = data['L_mean']*hb_sfr_conv
sfrHb_up = data['L_max']*hb_sfr_conv
sfrHb_low = data['L_min']*hb_sfr_conv
Nst = (data['N_in_stack']==400)
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ttt = p.scatter(data['spm_stellar_mass'][hb], data['spm_light_metallicity'][hb], marker='s', s=30, c=data['Redshift'][hb],label='Hb')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o2], data['spm_light_metallicity'][o2], marker ='o', s=30, c=data['Redshift'][o2],label='O2')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o3], data['spm_light_metallicity'][o3], marker ='^', s=30, c=data['Redshift'][o3],label='O3')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('redshift')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
#p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "mass-metallicity-redshift-all-hb-o2-o3-400.png"))
p.clf()
Nst = (data['N_in_stack']==100)
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ttt = p.scatter(data['spm_stellar_mass'][hb], data['spm_light_metallicity'][hb], marker='s', s=30, c=data['Redshift'][hb],label='Hb')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o2], data['spm_light_metallicity'][o2], marker ='o', s=30, c=data['Redshift'][o2],label='O2')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o3], data['spm_light_metallicity'][o3], marker ='^', s=30, c=data['Redshift'][o3],label='O3')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('redshift')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
#p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "mass-metallicity-redshift-all-hb-o2-o3-100.png"))
p.clf()
Nst = (data['N_in_stack']==200)
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ttt = p.scatter(data['spm_stellar_mass'][hb], data['spm_light_metallicity'][hb], marker='s', s=30, c=data['Redshift'][hb],label='Hb')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o2], data['spm_light_metallicity'][o2], marker ='o', s=30, c=data['Redshift'][o2],label='O2')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o3], data['spm_light_metallicity'][o3], marker ='^', s=30, c=data['Redshift'][o3],label='O3')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('redshift')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
#p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "mass-metallicity-redshift-all-hb-o2-o3-200.png"))
p.clf()
Nst = (data['N_in_stack']==200)
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ttt = p.scatter(data['spm_stellar_mass'][hb], data['spm_light_metallicity'][hb], marker='s', s=30, c=n.log10(sfrHb[hb]),label='Hb')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o2], data['spm_light_metallicity'][o2], marker ='o', s=30, c=n.log10(sfrO2[o2]),label='O2')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('log(SFR)')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
#p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "mass-metallicity-sfr-all-hb-o2-200.png"))
p.clf()
Nst = (data['N_in_stack']==400)
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ttt = p.scatter(data['spm_stellar_mass'][hb], data['spm_light_metallicity'][hb], marker='s', s=30, c=n.log10(sfrHb[hb]),label='Hb')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o2], data['spm_light_metallicity'][o2], marker ='o', s=30, c=n.log10(sfrO2[o2]),label='O2')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('log(SFR)')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
#p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "mass-metallicity-sfr-all-hb-o2-400.png"))
p.clf()
Nst = (data['N_in_stack']==100)
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ttt = p.scatter(data['spm_stellar_mass'][hb], data['spm_light_metallicity'][hb], marker='s', s=30, c=n.log10(sfrHb[hb]),label='Hb')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o2], data['spm_light_metallicity'][o2], marker ='o', s=30, c=n.log10(sfrO2[o2]),label='O2')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('log(SFR)')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
#p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "mass-metallicity-sfr-all-hb-o2-100.png"))
p.clf()
Nst = (data['N_in_stack']==100)&(data['Redshift']>=0.7)&(data['Redshift']<=0.85)
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ttt = p.scatter(data['spm_stellar_mass'][hb], data['spm_light_metallicity'][hb], marker='s', s=30, c=n.log10(sfrHb[hb]),label='Hb')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o2], data['spm_light_metallicity'][o2], marker ='o', s=30, c=n.log10(sfrO2[o2]),label='O2')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('log(SFR)')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "mass-metallicity-sfr-07z085-hb-o2-100.png"))
p.clf()
Nst = (data['N_in_stack']==200)&(data['Redshift']>=0.7)&(data['Redshift']<=0.85)
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ttt = p.scatter(data['spm_stellar_mass'][hb], data['spm_light_metallicity'][hb], marker='s', s=30, c=n.log10(sfrHb[hb]),label='Hb')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o2], data['spm_light_metallicity'][o2], marker ='o', s=30, c=n.log10(sfrO2[o2]),label='O2')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('log(SFR)')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "mass-metallicity-sfr-07z085-hb-o2-200.png"))
p.clf()
Nst = (data['N_in_stack']==400)&(data['Redshift']>=0.7)&(data['Redshift']<=0.85)
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ttt = p.scatter(data['spm_stellar_mass'][hb], data['spm_light_metallicity'][hb], marker='s', s=30, c=n.log10(sfrHb[hb]),label='Hb')
ttt.set_edgecolor('face')
ttt = p.scatter(data['spm_stellar_mass'][o2], data['spm_light_metallicity'][o2], marker ='o', s=30, c=n.log10(sfrO2[o2]),label='O2')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('log(SFR)')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "mass-metallicity-sfr-07z085-hb-o2-400.png"))
p.clf()
# mass metallicity relation
# Mannucci et al. 2010 :
y_rel = lambda x_rel : 8.90 + 0.39*x_rel - 0.20*x_rel*x_rel - 0.077*x_rel*x_rel*x_rel + 0.064*x_rel*x_rel*x_rel*x_rel
x_rel = n.arange(8.5,11.5,0.05) - 10
y_pr = y_rel(x_rel)
Nst = (data['logOH_tremonti04']!=-9999.99) # (data['Redshift']>=0.7)&(data['Redshift']<=0.85) &(data['N_in_stack']==400)&
hb = (data['lineWavelength']==4862.0) & (Nst)
o2 = (data['lineWavelength']==3728.0) & (Nst)
o3 = (data['lineWavelength']==5007.0) & (Nst)
x_obs_1 = data['spm_stellar_mass'] - 0.32 * n.log10(sfrHb) - 10
x_obs_2 = data['spm_stellar_mass'] - 0.32 * n.log10(sfrO2) - 10
y_obs_1 = 9.185-0.313* data['logOH_tremonti04'] - 0.264 * data['logOH_tremonti04']**2 - 0.321 * data['logOH_tremonti04']**3
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
p.plot(x_obs_1[hb], data['logOH_tremonti04'][hb], 'bo',label='Hb')
p.plot(x_obs_2[o2], data['logOH_tremonti04'][o2], 'r*',label='O2')
p.plot(x_rel, y_pr, label='Manucci 2010')
p.ylabel(r'$12+log(OH)$')
p.xlabel('$\log(M_*)-0.32 log(SFR) - 10$')
p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.ylim((8.2,9.5))
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "manucci-2010-relation-all-hb-o2.png"))
p.clf()
Nst = (datL['logOH_tremonti04']!=-9999.99) # (datL['Redshift']>=0.7)&(datL['Redshift']<=0.85) &(datL['N_in_stack']==400)&
hb = (datL['lineWavelength']==4862.0) & (Nst)
o2 = (datL['lineWavelength']==3728.0) & (Nst)
o3 = (datL['lineWavelength']==5007.0) & (Nst)
x_obs_1 = datL['spm_stellar_mass'] - 0.32 * n.log10(sfrHb) - 10
x_obs_2 = datL['spm_stellar_mass'] - 0.32 * n.log10(sfrO2) - 10
y_obs_1 = 9.185-0.313* datL['logOH_tremonti04'] - 0.264 * datL['logOH_tremonti04']**2 - 0.321 * datL['logOH_tremonti04']**3
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
p.plot(x_obs_1[hb], datL['logOH_tremonti04'][hb], 'bo',label='Hb')
p.plot(x_obs_2[o2], datL['logOH_tremonti04'][o2], 'r*',label='O2')
p.plot(x_rel, y_pr, label='Manucci 2010')
p.ylabel(r'$12+log(OH)$')
p.xlabel('$\log(M_*)-0.32 log(SFR) - 10$')
p.title(r'$0.7<z<0.85$ ELG')
p.ylim((8.2,9.5))
p.legend(loc=0)
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "manucci-2010-GP-relation-all-hb-o2.png"))
p.clf()
sys.exit()
print L_MEAN
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.8])
p.errorbar(x=gp_SFR_O2_3728[ok], xerr = gp_SFR_O2_3728_err[ok], y=gp_SFR_H1_4862[ok], yerr=gp_SFR_H1_4862_err[ok],fmt='o',elinewidth=2, mfc='none')
p.plot([-1,2],[-1,2],'k--')
p.legend(loc=0)
p.xlabel('SFR GP [OII]')
p.ylabel(r'SFR GP H$\beta$')
#p.xlim((-1,2))
#p.ylim((-1,2))
p.grid()
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "sfr-comparison-2.png"))
p.clf()
# mass metallicity relation
# Mannucci et al. 2010 :
y_rel = lambda x_rel : 8.90 + 0.39*x_rel - 0.20*x_rel*x_rel - 0.077*x_rel*x_rel*x_rel + 0.064*x_rel*x_rel*x_rel*x_rel
x_obs_1 = spm_stellar_mass - 0.32 * n.log10(gp_SFR_O2_3728) - 10
x_obs_2 = spm_stellar_mass - 0.32 * n.log10(gp_SFR_H1_4862) - 10
y_obs_1 = 9.185-0.313*spm_light_metallicity - 0.264 *spm_light_metallicity**2 - 0.321 *spm_light_metallicity**3
x_rel = n.arange(8.5,11.5,0.05) - 10
y_pr = y_rel(x_rel)
#base = (gp_SFR_O2_3728>0)& (gp_SFR_O2_3728_err>0)&(spm_stellar_mass >0)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
p.plot(G05[0],G05[1], 'k',label ="G05")
p.plot(G05[0],G05[2], 'k--')
p.plot(G05[0],G05[3], 'k--')
#p.plot(x_obs_1, y_obs_1, )
ok = (lineWavelength==4862.)
ttt = p.scatter(spm_stellar_mass[ok], spm_light_metallicity[ok], marker='s', s=30, c=n.log10(L_MEAN[ok]*10**(0.58) * 10**(-41) ),label='Hb')
ttt.set_edgecolor('face')
#ok = (base) &(lineWavelength==5007.)
#ttt = p.scatter(spm_stellar_mass[ok], spm_light_metallicity[ok], marker ='o', s=30, c=n.log10(gp_SFR_H1_4862[ok]),label='O3')
#ttt.set_edgecolor('face')
ok = (lineWavelength==3728.)
ttt = p.scatter(spm_stellar_mass[ok], spm_light_metallicity[ok], marker = '*', s=30, c=n.log10(L_MEAN[ok]*10**(0.27) * 10**(-41) ),label='O2')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('log(SFR)')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
#p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "sfr-mass-z-allredshift.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.8,0.75])
#p.plot(x_obs_1, y_obs_1, )
ok = (lineWavelength==4862.)
ttt = p.scatter(spm_stellar_mass[ok], spm_light_metallicity[ok], marker='s', s=30, c=Redshift[ok],label='Hb')
ttt.set_edgecolor('face')
ok = (lineWavelength==5007.)
ttt = p.scatter(spm_stellar_mass[ok], spm_light_metallicity[ok], marker ='o', s=30, c=Redshift[ok],label='O3')
ttt.set_edgecolor('face')
ok = (lineWavelength==3728.)
ttt = p.scatter(spm_stellar_mass[ok], spm_light_metallicity[ok], marker = '*', s=30, c=Redshift[ok],label='O2')
ttt.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label('redshift')
p.ylabel(r'$log([Z/H])$')
p.xlabel('$\log(M_*)$')
#p.title(r'$0.7<z<0.85$ ELG')
p.legend(loc=0)
p.savefig( join(os.environ['SPECTRASTACKS_DIR'], "plots", "sfr-mass-redshift-allredshift.png"))
p.clf() | cc0-1.0 |
constantinpape/mc_luigi | mc_luigi/customTargets.py | 1 | 17830 | from __future__ import division, print_function
from luigi.target import FileSystemTarget
from luigi.file import LocalFileSystem
import os
import numpy as np
import pickle
import vigra
import h5py
import z5py
from .pipelineParameter import PipelineParameter
# import the proper nifty version
try:
import nifty
import nifty.graph.rag as nrag
except ImportError:
try:
import nifty_with_cplex as nifty
import nifty_with_cplex.graph.rag as nrag
except ImportError:
import nifty_with_gurobi as nifty
import nifty_with_gurobi.graph.rag as nrag
# FIXME this should work ...
#if nifty.Configuration.WITH_HDF5:
# import nifty.hdf5 as nh5
#
#if nifty.Configuration.WITH_Z5:
# import nifty.z5 as nz5
import nifty.z5 as nz5
class BaseTarget(FileSystemTarget):
"""
Custom target base class
"""
fs = LocalFileSystem()
def __init__(self, path):
super(BaseTarget, self).__init__(path)
def makedirs(self):
"""
Create all parent folders if they do not exist.
"""
normpath = os.path.normpath(self.path)
parentfolder = os.path.dirname(normpath)
if parentfolder:
try:
os.makedirs(parentfolder)
except OSError:
pass
class VolumeTarget(BaseTarget):
"""
Volume target, can hold n5 or hdf5 backend.
"""
n5_ending = '.n5'
h5_ending = '.h5'
def __init__(self, path):
# FIXME this does nor work as default argument
use_n5 = PipelineParameter().useN5Backend
super(BaseTarget, self).__init__(path)
self.makedirs()
self._impl = N5Target(self.path) if use_n5 else HDF5Target(self.path)
@staticmethod
def file_ending():
return VolumeTarget.n5_ending if PipelineParameter().useN5Backend else VolumeTarget.h5_ending
def __contains__(self, key):
return key in self._impl
def open(self, key='data', dtype=None, shape=None, chunks=None, **compression_opts):
return self._impl.open(key=key, dtype=dtype, shape=shape, chunks=chunks,
**compression_opts)
def write(self, start, data, key='data'):
self._impl.write(start, data, key)
def read(self, start, stop, key='data'):
return self._impl.read(start, stop, key)
def get(self, key='data'):
return self._impl.get(key)
def shape(self, key='data'):
return self._impl.shape(key)
def chunks(self, key='data'):
return self._impl.chunks(key)
def dtype(self, key='data'):
return self._impl.dtype(key)
def close(self):
self._impl.close()
def keys_on_filesystem(self):
return self._impl.keys_on_filesystem()
def keys(self):
return self._impl.keys()
# TODO interface for the offsets once implemented in z5
# TODO enable zarr format ?!
class N5Target(object):
"""
Target for data in n5 format
"""
def __init__(self, path):
self.path = path
self.datasets = {}
self.n5_file = None
def _open_file(self):
self.n5_file = z5py.File(self.path, use_zarr_format=False)
def __contains__(self, key):
if self.n5_file is None:
self._open_file()
return key in self.n5_file
def keys_on_filesystem(self):
# open the n5 file if it wasn't opened yet
if self.n5_file is None:
self._open_file()
return self.n5_file.keys()
# TODO change compression to blosc as soon as n5 supports it !
# TODO offset handling, need to implement loading with offsets and offsets in z5
def open(self, key='data', dtype=None, shape=None, chunks=None, compression='gzip', **compression_opts):
# open the n5 file if it wasn't opened yet
if self.n5_file is None:
self._open_file()
# if we have already opened the dataset, we don't need to do anything
if key in self.datasets:
return self
# otherwise we need to check, if this dataset exists on file and either open or create it
if key in self.n5_file:
self.datasets[key] = self.n5_file[key]
else:
# if we need to create the dataset, we need to make sure that
# dtype, shape and chunks are actually specified
assert dtype is not None, "Can't open a new dataset if dtype is not specified"
assert shape is not None, "Can't open a new dataset if shape is not specified"
assert chunks is not None, "Can't open a new dataset if chunks are not specified"
self.datasets[key] = self.n5_file.create_dataset(key,
dtype=dtype,
shape=shape,
chunks=chunks,
compressor=compression,
**compression_opts)
# TODO implement offsets in n5
# check if any offsets were added to the array
# if self.has_offsets(key):
# ds = self.datasets[key]
# offset_front = ds.attrs.get('offset_front')
# offset_back = ds.attrs.get('offset_back')
# self.set_offsets(offset_front, offset_back, 'data', serialize_offsets=False)
return self
def write(self, start, data, key='data'):
assert self.n5_file is not None, "Need to open the n5 file first"
assert key in self.datasets, "Can't write to a dataset that has not been opened"
self.datasets[key].write_subarray(start, data)
def read(self, start, stop, key='data'):
assert self.n5_file is not None, "Need to open the n5 file first"
assert key in self.datasets, "Can't read from a dataset that has not been opened"
return self.datasets[key].read_subarray(start, stop)
# get the dataset implementation to pass to c++ code
def get(self, key='data'):
assert self.n5_file is not None, "Need to open the n5 file first"
assert key in self.datasets, "Can't get ds impl for a dataset that has not been opened"
dtype = self.datasets[key].dtype
return nz5.datasetWrapper(dtype, os.path.join(self.path, key))
def shape(self, key='data'):
assert self.n5_file is not None, "Need to open the n5 file first"
assert key in self.datasets, "Can't get shape for a dataset that has not been opened"
return self.datasets[key].shape
def chunks(self, key='data'):
assert self.n5_file is not None, "Need to open the n5 file first"
assert key in self.datasets, "Can't get chunks for a dataset that has not been opened"
return self.datasets[key].chunks
def dtype(self, key='data'):
assert self.n5_file is not None, "Need to open the n5 file first"
assert key in self.datasets, "Can't get chunks for a dataset that has not been opened"
return self.datasets[key].dtype
def keys(self):
assert self.n5_file is not None, "Need to open the n5 file first"
return self.datasets.keys()
# dummy implementation to be consisteny with HDF5Target
def close(self):
pass
# add offsets to the nh5 array
def set_offsets(self, offset_front, offset_back, key='data', serialize_offsets=True):
assert False, "Offsets not implemented in z5py yet"
assert key in self.datasets, "Can't set offsets for a dataset that has not been opened"
# TODO implement in z5
self.datasets[key].set_offset_front(offset_front)
self.datasets[key].set_offset_back(offset_back)
# serialize the offsets
if serialize_offsets:
self.serialize_offsets(offset_front, offset_back, key)
def serialize_offsets(self, offset_front, offset_back, key='data'):
assert False, "Offsets not implemented in z5py yet"
assert key in self.datasets, "Can't serialize offsets for a dataset that has not been opened"
self.datasets[key].attrs['offset_front'] = offset_front
self.datasets[key].attrs['offset_back'] = offset_back
@staticmethod
def has_offsets(path, key='data'):
assert False, "Offsets not implemented in z5py yet"
f = z5py.File(path)
ds = f[key]
if 'offset_front' in ds.attrs:
assert 'offset_back' in ds.attrs
return True
else:
return False
class HDF5Target(object):
"""
Target for h5 data larger than RAM
"""
def __init__(self, path):
self.path = path
self.datasets = {}
self.h5_file = None
def _open_file(self):
self.h5_file = nh5.openFile(self.path) if os.path.exists(self.path) else \
nh5.createFile(self.path)
def __contains__(self, key):
with h5py.File(self.path) as f:
return key in f
def keys_on_filesystem(self):
with h5py.File(self.path) as f:
return f.keys()
def open(self, key='data', dtype=None, shape=None, chunks=None, compression='gzip', **compression_opts):
# open the h5 file if it is not exisiting already
if self.h5_file is None:
self._open_file()
# if we have already opened the dataset, we don't need to do anything
if key in self.datasets:
return self
# otherwise we need to check, if this dataset exists on file and either open or create it
with h5py.File(self.path) as fh5:
has_key = key in fh5
if has_key:
if dtype is None:
with h5py.File(self.path) as f:
dtype = f[key].dtype
self.datasets[key] = nh5.hdf5Array(dtype, self.h5_file, key)
else:
# if we need to create the dataset, we need to make sure that
# dtype, shape and chunks are actually specified
assert dtype is not None, "Can't open a new dataset if dtype is not specified"
assert shape is not None, "Can't open a new dataset if shape is not specified"
assert chunks is not None, "Can't open a new dataset if chunks are not specified"
clevel = compression_opts.get('level', 4)
compression_ = -1 if compression != 'gzip' else clevel
self.datasets[key] = nh5.hdf5Array(dtype, self.h5_file, key,
shape, chunks,
compression=compression_)
# TODO re-enable support for ofsets once we have this in z5
# check if any offsets were added to the array
# if self.has_offsets(key):
# ds = self.datasets[key]
# offset_front = ds.attrs.get('offset_front')
# offset_back = ds.attrs.get('offset_back')
# self.set_offsets(offset_front, offset_back, 'data', serialize_offsets=False)
return self
def close(self):
assert self.h5_file is not None, "Need to open the h5 file first"
nh5.closeFile(self.h5_file)
def write(self, start, data, key='data'):
assert self.h5_file is not None, "Need to open the h5 file first"
assert key in self.datasets, "Can't write to a dataset that has not been opened"
self.datasets[key].writeSubarray(list(start), data)
def read(self, start, stop, key='data'):
assert self.h5_file is not None, "Need to open the h5 file first"
assert key in self.datasets, "Can't read from a dataset that has not been opened"
return self.datasets[key].readSubarray(list(start), list(stop))
def get(self, key='data'):
assert self.h5_file is not None, "Need to open the h5 file first"
assert key in self.datasets, "Can't get ds impl for a dataset that has not been opened"
return self.datasets[key]
def shape(self, key='data'):
assert self.h5_file is not None, "Need to open the h5 file first"
assert key in self.datasets, "Can't get shape for a dataset that has not been opened"
return self.datasets[key].shape
def chunks(self, key='data'):
assert self.h5_file is not None, "Need to open the h5 file first"
assert key in self.datasets, "Can't get chunks for a dataset that has not been opened"
return self.datasets[key].chunkShape
def dtype(self, key='data'):
assert self.h5_file is not None, "Need to open the h5 file first"
with h5py.File(self.path) as f:
return f[key].dtype
def keys(self):
assert self.h5_file is not None, "Need to open the h5 file first"
return self.datasets.keys()
# add offsets to the nh5 array
def set_offsets(self, offset_front, offset_back, key='data', serialize_offsets=True):
assert self.h5_file is not None, "Need to open the n5 file first"
assert key in self.datasets, "Can't set offsets for a dataset that has not been opened"
self.datasets[key].setOffsetFront(offset_front)
self.datasets[key].setOffsetBack(offset_back)
# serialize the offsets
if serialize_offsets:
self.serialize_offsets(offset_front, offset_back, key)
def serialize_offsets(self, offset_front, offset_back, key='data'):
assert self.h5_file is not None, "Need to open the n5 file first"
assert key in self.datasets, "Can't serialize offsets for a dataset that has not been opened"
self.datasets[key].attrs.create('offset_front', offset_front)
self.datasets[key].attrs.create('offset_back', offset_back)
@staticmethod
def has_offsets(path, key='data'):
with h5py.File(path) as f:
ds = f[key]
if 'offset_front' in ds.attrs:
assert 'offset_back' in ds.attrs
return True
else:
return False
class HDF5DataTarget(BaseTarget):
"""
Target for h5 data in RAM
"""
def __init__(self, path):
super(HDF5DataTarget, self).__init__(path)
def open(self, shape, dtype, compression=None, chunks=None, key='data'):
with h5py.File(self.path) as f:
f.create_dataset(key,
shape=shape,
compression=compression,
chunks=chunks,
dtype=dtype)
def write(self, data, key="data", compression=None):
self.makedirs()
if compression is not None:
vigra.writeHDF5(data, self.path, key, compression=compression)
else:
vigra.writeHDF5(data, self.path, key)
def writeVlen(self, data, key='data'):
self.makedirs()
with h5py.File(self.path) as f:
dt = h5py.special_dtype(vlen=np.dtype(data[0].dtype))
f.create_dataset(key, data=data, dtype=dt)
def read(self, key="data"):
return vigra.readHDF5(self.path, key)
def shape(self, key="data"):
with h5py.File(self.path) as f:
shape = f[key].shape
return shape
def writeSubarray(self, start, data, key="data"):
bb = tuple(slice(sta, sta + sha) for sta, sha in zip(start, data.shape))
with h5py.File(self.path) as f:
f[key][bb] = data
class PickleTarget(BaseTarget):
"""
Target for pickle data
"""
def __init__(self, path):
super(PickleTarget, self).__init__(path)
def open(self, mode='r'):
raise AttributeError("Not implemented")
def write(self, data):
self.makedirs()
with open(self.path, 'w') as f:
pickle.dump(data, f)
def read(self):
with open(self.path, 'r') as f:
return pickle.load(f)
# Folder target that does basically nothing
# wee need this for the sklearn random forest,
# that is pickled to different files in a common folder
class FolderTarget(BaseTarget):
"""
Target for multiple files in folder
"""
def __init__(self, path):
super(FolderTarget, self).__init__(path)
self.path = path
def open(self, mode='r'):
raise AttributeError("Not implemented")
def write(self, data):
raise AttributeError("Not implemented")
def read(self):
raise AttributeError("Not implemented")
# serializing the nifty rag
class StackedRagTarget(BaseTarget):
"""
Target for nifty stacked rag
"""
def __init__(self, path):
super(StackedRagTarget, self).__init__(path)
def open(self, mode='r'):
raise AttributeError("Not implemented")
def write(self, rag, labelsPath, labelsKey="data"):
self.makedirs()
nrag.writeStackedRagToHdf5(rag, self.path)
vigra.writeHDF5(labelsPath, self.path, "labelsPath")
vigra.writeHDF5(labelsKey, self.path, "labelsKey")
# read and deserialize the rag
def read(self):
labelsPath = vigra.readHDF5(self.path, "labelsPath")
labelsKey = vigra.readHDF5(self.path, "labelsKey")
with h5py.File(self.path) as f:
dtype = f.attrs['dtype']
if PipelineParameter().useN5Backend:
labels = nz5.datasetWrapper(dtype, os.path.join(labelsPath, labelsKey))
else:
h5_file = nh5.openFile(labelsPath)
labels = nh5.Hdf5Array(dtype, h5_file, labelsKey)
nNodes = vigra.readHDF5(self.path, "numberOfNodes")
return nrag.readStackedRagFromHdf5(labels, nNodes, self.path)
# only read sub-parts
def readKey(self, key):
with h5py.File(self.path, 'r') as f:
if key not in f.keys():
print("The key", key, "is not in", f.keys())
raise KeyError("Key not found!")
return vigra.readHDF5(self.path, key)
| mit |
cloud-fan/spark | python/pyspark/sql/dataframe.py | 4 | 100392 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
import warnings
from functools import reduce
from html import escape as html_escape
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3.0
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
.. versionadded:: 1.3.0
Examples
--------
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. deprecated:: 2.0.0
Use :meth:`DataFrame.createOrReplaceTempView` instead.
Examples
--------
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.",
FutureWarning
)
self._jdf.createOrReplaceTempView(name)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.1.0
Examples
--------
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
.. versionadded:: 2.2.0
Examples
--------
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except Exception as e:
raise ValueError(
"Unable to parse datatype from schema. %s" % e) from e
return self._schema
def printSchema(self):
"""Prints out the schema in the tree format.
.. versionadded:: 1.3.0
Examples
--------
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
.. versionadded:: 1.3.0
parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
mode : str, optional
specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
Examples
--------
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
"""
if extended is not None and mode is not None:
raise ValueError("extended and mode should not be set together.")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = isinstance(extended, bool) and mode is None
# For the case when extended is mode:
# df.explain("formatted")
is_extended_as_mode = isinstance(extended, str) and mode is None
# For the mode specified:
# df.explain(mode="formatted")
is_mode_case = extended is None and isinstance(mode, str)
if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
argtypes = [
str(type(arg)) for arg in [extended, mode] if arg is not None]
raise TypeError(
"extended (optional) and mode (optional) should be a string "
"and bool; however, got [%s]." % ", ".join(argtypes))
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
elif is_extended_as_mode:
explain_mode = extended
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
As standard in SQL, this function resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
return self._jdf.isStreaming()
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
.. versionadded:: 1.3.0
Parameters
----------
n : int, optional
Number of rows to show.
truncate : bool or int, optional
If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical : bool, optional
If set to ``True``, print output rows vertically (one line
per column value).
Examples
--------
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if not isinstance(n, int) or isinstance(n, bool):
raise TypeError("Parameter 'n' (number of rows) must be an int")
if not isinstance(vertical, bool):
raise TypeError("Parameter 'vertical' must be a bool")
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
try:
int_truncate = int(truncate)
except ValueError:
raise TypeError(
"Parameter 'truncate={}' should be either bool or int.".format(truncate))
print(self._jdf.showString(n, int_truncate, vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
.. versionadded:: 2.1.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
.. versionadded:: 2.3.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
.. versionadded:: 2.1.0
Parameters
----------
eventTime : str
the name of the column that contains the event time of the row.
delayThreshold : str
the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
Notes
-----
This API is evolving.
>>> from pyspark.sql.functions import timestamp_seconds
>>> sdf.select(
... 'name',
... timestamp_seconds(sdf.time).alias('time')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
.. versionadded:: 2.2.0
Parameters
----------
name : str
A name of the hint.
parameters : str, list, float or int
Optional parameters.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (str, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.count()
2
"""
return int(self._jdf.count())
def collect(self):
"""Returns all the records as a list of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
.. versionadded:: 2.0.0
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition before it is needed.
Examples
--------
>>> list(df.toLocalIterator())
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
def limit(self, num):
"""Limits the result count to the number specified.
.. versionadded:: 1.3.0
Examples
--------
>>> df.limit(1).collect()
[Row(age=2, name='Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.take(2)
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
return self.limit(num).collect()
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
.. versionadded:: 3.0.0
Examples
--------
>>> df.tail(1)
[Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK_DESER):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala in 3.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
.. versionadded:: 2.1.0
Examples
--------
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 1.3.0
Notes
-----
`blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
.. versionadded:: 1.4.0
Parameters
----------
numPartitions : int
specify the target number of partitions
Examples
--------
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
.. versionadded:: 1.3.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
Examples
--------
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
.. versionadded:: 2.4.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
Notes
-----
Due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
Examples
--------
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
withReplacement : bool, optional
Sample with replacement or not (default ``False``).
fraction : float, optional
Fraction of rows to generate, range [0.0, 1.0].
seed : int, optional
Seed for sampling (default a random seed).
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
`fraction` is required and, `withReplacement` and `seed` are optional.
Examples
--------
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = int(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
column that defines strata
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
fractions : dict
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
seed : int, optional
random seed
Returns
-------
a new :class:`DataFrame` that represents the stratified sample
Examples
--------
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
"""
if isinstance(col, str):
col = Column(col)
elif not isinstance(col, Column):
raise TypeError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise TypeError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, str)):
raise TypeError("key must be float, int, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
.. versionadded:: 1.4.0
Parameters
----------
weights : list
list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed : int, optional
The seed for sampling.
Examples
--------
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), int(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
def dtypes(self):
"""Returns all column names and their data types as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
def columns(self):
"""Returns all column names as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
.. versionadded:: 2.3.0
Parameters
----------
colName : str
string, column name specified as a regex.
Examples
--------
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, str):
raise TypeError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
.. versionadded:: 1.3.0
Parameters
----------
alias : str
an alias name to be set for the :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name='Bob', name='Bob', age=5), Row(name='Alice', name='Alice', age=2)]
"""
assert isinstance(alias, str), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
.. versionadded:: 2.1.0
Parameters
----------
other : :class:`DataFrame`
Right side of the cartesian product.
Examples
--------
>>> df.select("age", "name").collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df2.select("name", "height").collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name='Alice', height=80), Row(age=2, name='Alice', height=85),
Row(age=5, name='Bob', height=80), Row(age=5, name='Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
.. versionadded:: 1.3.0
Parameters
----------
other : :class:`DataFrame`
Right side of the join
on : str, list or :class:`Column`, optional
a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
how : str, optional
default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
Examples
--------
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name='Bob', height=85), Row(name='Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85), Row(name='Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name='Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name='Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], str):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, str), "how should be a string"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
.. versionadded:: 1.6.0
Parameters
----------
cols : str, list or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
.. versionadded:: 1.3.0
Parameters
----------
cols : str, list, or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
.. versionadded:: 1.3.1
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Use summary for expanded statistics and control over which statistics to compute.
Examples
--------
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
See Also
--------
DataFrame.summary
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (e.g., 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. versionadded:: 2.3.0
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See Also
--------
DataFrame.display
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
n : int, optional
default 1. Number of rows to return.
Returns
-------
If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
Examples
--------
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
def first(self):
"""Returns the first row as a :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.first()
Row(age=2, name='Alice')
"""
return self.head()
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name='Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name='Bob')]
"""
if isinstance(item, str):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str, :class:`Column`, or list
column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
Examples
--------
>>> df.select('*').collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.select('name', 'age').collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name='Alice', age=12), Row(name='Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
.. versionadded:: 1.3.0
Examples
--------
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
Examples
--------
>>> df.filter(df.age > 3).collect()
[Row(age=5, name='Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name='Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name='Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name='Alice')]
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
Examples
--------
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name='Alice', age=2, count=1), Row(name='Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy().agg()``).
.. versionadded:: 1.3.0
Examples
--------
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
def unionByName(self, other, allowMissingColumns=False):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
.. versionadded:: 2.3.0
Examples
--------
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
When the parameter `allowMissingColumns` is ``True``, the set of column names
in this and other :class:`DataFrame` can differ; missing columns will be filled with null.
Further, the missing columns of this :class:`DataFrame` will be added at the end
in the schema of the union result:
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|null|
|null| 4| 5| 6|
+----+----+----+----+
.. versionchanged:: 3.1.0
Added optional argument `allowMissingColumns` to specify whether to allow
missing columns.
"""
return DataFrame(self._jdf.unionByName(other._jdf, allowMissingColumns), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL. As standard in SQL, this function
resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
how : str, optional
'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
thresh: int, optional
default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
subset : str, tuple or list, optional
optional list of column names to consider.
Examples
--------
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise TypeError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
value : int, float, string, bool or dict
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
subset : str, tuple or list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, str, bool, dict)):
raise TypeError("value should be a float, int, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, int):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise TypeError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
.. versionadded:: 1.4.0
Parameters
----------
to_replace : bool, int, float, string, list or dict
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
value : bool, int, float, string or None, optional
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
subset : list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(str)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(str)
all_of_numeric = all_of((float, int))
# Validate input types
valid_types = (bool, float, int, str, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise TypeError(
"to_replace should be a bool, float, int, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise TypeError("If to_replace is not a dict, value should be "
"a bool, float, int, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, str))):
raise TypeError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, str)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, str)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, str):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
.. versionadded:: 2.0.0
Parameters
----------
col: str, tuple or list
Can be a single column name, or a list of names for multiple columns.
.. versionchanged:: 2.2
Added support for multiple columns.
probabilities : list or tuple
a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
relativeError : float
The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
Returns
-------
list
the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
"""
if not isinstance(col, (str, list, tuple)):
raise TypeError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, str):
raise TypeError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise TypeError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int)):
raise TypeError("relativeError should be numerical (float, int)")
if relativeError < 0:
raise ValueError("relativeError should be >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
method : str, optional
The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column. Distinct items will make the first item of
each row.
col2 : str
The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
cols : list or tuple
Names of the columns to calculate frequent items for as a list or tuple of
strings.
support : float, optional
The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise TypeError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
.. versionadded:: 1.3.0
Parameters
----------
colName : str
string, name of the new column.
col : :class:`Column`
a :class:`Column` expression for the new column.
Notes
-----
This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
Examples
--------
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name='Alice', age2=4), Row(age=5, name='Bob', age2=7)]
"""
if not isinstance(col, Column):
raise TypeError("col should be Column")
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
.. versionadded:: 1.3.0
Parameters
----------
existing : str
string, name of the existing column to rename.
new : str
string, new name of the column.
Examples
--------
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name='Alice'), Row(age2=5, name='Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
.. versionadded:: 1.4.0
Parameters
----------
cols: str or :class:`Column`
a name of the column, or the :class:`Column` to drop
Examples
--------
>>> df.drop('age').collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.drop(df.age).collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name='Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name='Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, str):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, str):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def toDF(self, *cols):
"""Returns a new :class:`DataFrame` that with new specified column names
Parameters
----------
cols : str
new column names
Examples
--------
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2='Alice'), Row(f1=5, f2='Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def transform(self, func):
"""Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a function that takes and returns a :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. versionadded:: 3.1.0
Notes
-----
The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
This API is a developer API.
Examples
--------
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise TypeError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. versionadded:: 3.1.0
Notes
-----
Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
This API is a developer API.
Examples
--------
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
def inputFiles(self):
"""
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.read.load("examples/src/main/resources/people.json", format="json")
>>> len(df.inputFiles())
1
"""
return list(self._jdf.inputFiles())
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
def writeTo(self, table):
"""
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
"""
return DataFrameWriterV2(self, table)
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(height=80, name='Tom'), Row(height=85, name='Bob')]).toDF()
globs['df3'] = sc.parallelize([Row(age=2, name='Alice'),
Row(age=5, name='Bob')]).toDF()
globs['df4'] = sc.parallelize([Row(age=10, height=80, name='Alice'),
Row(age=5, height=None, name='Bob'),
Row(age=None, height=None, name='Tom'),
Row(age=None, height=None, name=None)]).toDF()
globs['df5'] = sc.parallelize([Row(age=10, name='Alice', spy=False),
Row(age=5, name='Bob', spy=None),
Row(age=None, name='Mallory', spy=True)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
balister/GNU-Radio | gr-filter/examples/resampler.py | 58 | 4454 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print "Resampling from %f to %f by %f " %(fs_in, fs_out, rerate)
# Creating our own taps
taps = filter.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = analog.sig_source_c(fs_in, analog.GR_SIN_WAVE, fc, 1)
#self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = filter.pfb.arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = filter.pfb.arb_resampler_ccf(rerate)
self.snk_in = blocks.vector_sink_c()
self.snk_0 = blocks.vector_sink_c()
self.snk_1 = blocks.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pylab.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp1.set_xlim([-fs_in/2, fs_in/2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
sp2.set_xlim([-fs_out/2, fs_out/2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0/fs_in
Ts_out = 1.0/fs_out
t_in = scipy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = scipy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pylab.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
r = float(fs_out)/float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
seckcoder/lang-learn | python/sklearn/sklearn/datasets/species_distributions.py | 7 | 7758 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: Simplified BSD
from cStringIO import StringIO
from os import makedirs
from os.path import join
from os.path import exists
import urllib2
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6,
dtype=np.int16):
"""
load a coverage file.
This will return a numpy array of the given dtype
"""
try:
header = [F.readline() for i in range(header_length)]
except:
F = open(F)
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header['NODATA_value']
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Paramters
---------
F : string or file object
file object or name of file
Returns
-------
rec : np.ndarray
record array representing the data
"""
try:
names = F.readline().strip().split(',')
except:
F = open(F)
names = F.readline().strip().split(',')
rec = np.loadtxt(F, skiprows=1, delimiter=',',
dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print 'Downloading species data from %s to %s' % (SAMPLES_URL,
data_home)
X = np.load(StringIO(urllib2.urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = StringIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print 'Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home)
X = np.load(StringIO(urllib2.urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = StringIO(X[f])
print ' - converting', f
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages,
dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| unlicense |
witgo/spark | python/pyspark/sql/tests/test_pandas_udf_typehints.py | 22 | 9603 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from typing import Union, Iterator, Tuple
from pyspark.sql.functions import mean, lit
from pyspark.testing.sqlutils import ReusedSQLTestCase, \
have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType
from pyspark.sql import Row
if have_pandas:
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTypeHintsTests(ReusedSQLTestCase):
def test_type_annotation_scalar(self):
def func(col: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, col1: pd.Series) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, *args: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *, col2: pd.DataFrame) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def test_type_annotation_scalar_iter(self):
def func(iter: Iterator[pd.Series]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, pd.Series]]) -> Iterator[pd.DataFrame]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, ...]]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(
iter: Iterator[Tuple[Union[pd.DataFrame, pd.Series], ...]]
) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def test_type_annotation_group_agg(self):
def func(col: pd.Series) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, col1: pd.Series) -> int:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, *args: pd.Series) -> Row:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def test_type_annotation_negative(self):
def func(col: str) -> pd.Series:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.DataFrame, col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*int",
infer_eval_type, inspect.signature(func))
def func(col: Union[pd.DataFrame, str], col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series) -> Tuple[pd.DataFrame]:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*Tuple",
infer_eval_type, inspect.signature(func))
def func(col, *args: pd.Series) -> pd.Series:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame):
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *, col2) -> pd.DataFrame:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def test_scalar_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(v: Union[pd.Series, pd.DataFrame]) -> pd.Series:
return v + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_scalar_iter_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(itr: Iterator[pd.Series]) -> Iterator[pd.Series]:
for s in itr:
yield s + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_group_agg_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def weighted_mean(v: pd.Series, w: pd.Series) -> float:
return np.average(v, weights=w)
weighted_mean = pandas_udf("double")(weighted_mean)
actual = df.groupby('id').agg(weighted_mean(df.v, lit(1.0))).sort('id')
expected = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_group_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(v: pd.DataFrame) -> pd.DataFrame:
return v + 1
actual = df.groupby('id').applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_cogroup_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
return left + 1
actual = df.groupby('id').cogroup(
self.spark.range(10).groupby("id")
).applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_map_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(iter: Iterator[pd.DataFrame]) -> Iterator[pd.DataFrame]:
return map(lambda v: v + 1, iter)
actual = df.mapInPandas(pandas_plus_one, schema=df.schema)
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_typehints import * # noqa: #401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
joshloyal/scikit-learn | sklearn/linear_model/coordinate_descent.py | 3 | 81536 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1], dtype=X.dtype))
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations.' +
' Fitting data with very small alpha' +
' may cause precision problems.',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter.``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, six.string_types):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc',
order='F', dtype=[np.float64, np.float32],
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matrices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = check_array(y, copy=False, dtype=[np.float64, np.float32],
ensure_2d=False)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=X.dtype.type)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
X = check_array(X, dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=X.dtype.type, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations',
ConvergenceWarning)
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
beiko-lab/gengis | bin/Lib/site-packages/numpy/core/function_base.py | 1 | 5639 | __all__ = ['logspace', 'linspace']
import numeric as _nx
from numeric import array
def linspace(start, stop, num=50, endpoint=True, retstep=False):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop` ].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float (only if `retstep` is True)
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num <= 0:
return array([], float)
if endpoint:
if num == 1:
return array([float(start)])
step = (stop-start)/float((num-1))
y = _nx.arange(0, num) * step + start
y[-1] = stop
else:
step = (stop-start)/float(num)
y = _nx.arange(0, num) * step + start
if retstep:
return y, step
else:
return y
def logspace(start,stop,num=50,endpoint=True,base=10.0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start,stop,num=num,endpoint=endpoint)
return _nx.power(base,y)
| gpl-3.0 |
yl565/statsmodels | statsmodels/imputation/tests/test_mice.py | 4 | 10458 | import numpy as np
import pandas as pd
from statsmodels.imputation import mice
import statsmodels.api as sm
from numpy.testing import assert_equal, assert_allclose, dec
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_mice.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if not have_matplotlib:
return
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
def teardown_module():
if not have_matplotlib:
return
plt.close('all')
if pdf_output:
pdf.close()
def gendat():
"""
Create a data set with missing values.
"""
np.random.seed(34243)
n = 200
p = 5
exog = np.random.normal(size=(n, p))
exog[:, 0] = exog[:, 1] - exog[:, 2] + 2*exog[:, 4]
exog[:, 0] += np.random.normal(size=n)
exog[:, 2] = 1*(exog[:, 2] > 0)
endog = exog.sum(1) + np.random.normal(size=n)
df = pd.DataFrame(exog)
df.columns = ["x%d" % k for k in range(1, p+1)]
df["y"] = endog
df.x1[0:60] = np.nan
df.x2[0:40] = np.nan
df.x3[10:30:2] = np.nan
df.x4[20:50:3] = np.nan
df.x5[40:45] = np.nan
df.y[30:100:2] = np.nan
return df
class TestMICEData(object):
def test_default(self):
"""
Test with all defaults.
"""
df = gendat()
orig = df.copy()
mx = pd.notnull(df)
imp_data = mice.MICEData(df)
nrow, ncol = df.shape
assert_allclose(imp_data.ix_miss['x1'], np.arange(60))
assert_allclose(imp_data.ix_obs['x1'], np.arange(60, 200))
assert_allclose(imp_data.ix_miss['x2'], np.arange(40))
assert_allclose(imp_data.ix_miss['x3'], np.arange(10, 30, 2))
assert_allclose(imp_data.ix_obs['x3'],
np.concatenate((np.arange(10),
np.arange(11, 30, 2),
np.arange(30, 200))))
for k in range(3):
imp_data.update_all()
assert_equal(imp_data.data.shape[0], nrow)
assert_equal(imp_data.data.shape[1], ncol)
assert_allclose(orig[mx], imp_data.data[mx])
fml = 'x1 ~ x2 + x3 + x4 + x5 + y'
assert_equal(imp_data.conditional_formula['x1'], fml)
assert_equal(imp_data._cycle_order, ['x5', 'x3', 'x4', 'y', 'x2', 'x1'])
# Should make a copy
assert(not (df is imp_data.data))
(endog_obs, exog_obs, exog_miss,
predict_obs_kwds, predict_miss_kwds) = imp_data.get_split_data('x3')
assert_equal(len(endog_obs), 190)
assert_equal(exog_obs.shape, [190, 6])
assert_equal(exog_miss.shape, [10, 6])
def test_next_sample(self):
df = gendat()
imp_data = mice.MICEData(df)
all_x = []
for j in range(2):
x = imp_data.next_sample()
assert(isinstance(x, pd.DataFrame))
assert_equal(df.shape, x.shape)
all_x.append(x)
# The returned dataframes are all the same object
assert(all_x[0] is all_x[1])
def test_pertmeth(self):
"""
Test with specified perturbation method.
"""
df = gendat()
orig = df.copy()
mx = pd.notnull(df)
nrow, ncol = df.shape
for pert_meth in "gaussian", "boot":
imp_data = mice.MICEData(df, perturbation_method=pert_meth)
for k in range(2):
imp_data.update_all()
assert_equal(imp_data.data.shape[0], nrow)
assert_equal(imp_data.data.shape[1], ncol)
assert_allclose(orig[mx], imp_data.data[mx])
assert_equal(imp_data._cycle_order, ['x5', 'x3', 'x4', 'y', 'x2', 'x1'])
def test_phreg(self):
np.random.seed(8742)
n = 300
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
event_time = np.random.exponential(size=n) * np.exp(x1)
obs_time = np.random.exponential(size=n)
time = np.where(event_time < obs_time, event_time, obs_time)
status = np.where(time == event_time, 1, 0)
df = pd.DataFrame({"time": time, "status": status, "x1": x1, "x2": x2})
df.loc[10:40, 'time'] = np.nan
df.loc[10:40, 'status'] = np.nan
df.loc[30:50, 'x1'] = np.nan
df.loc[40:60, 'x2'] = np.nan
from statsmodels.duration.hazard_regression import PHReg
idata = mice.MICEData(df)
idata.set_imputer("time", "0 + x1 + x2", model_class=PHReg,
init_kwds={"status": mice.PatsyFormula("status")},
predict_kwds={"pred_type": "hr"})
x = idata.next_sample()
assert(isinstance(x, pd.DataFrame))
def test_set_imputer(self):
"""
Test with specified perturbation method.
"""
from statsmodels.regression.linear_model import RegressionResultsWrapper
from statsmodels.genmod.generalized_linear_model import GLMResultsWrapper
df = gendat()
orig = df.copy()
mx = pd.notnull(df)
nrow, ncol = df.shape
imp_data = mice.MICEData(df)
imp_data.set_imputer('x1', 'x3 + x4 + x3*x4')
imp_data.set_imputer('x2', 'x4 + I(x5**2)')
imp_data.set_imputer('x3', model_class=sm.GLM,
init_kwds={"family": sm.families.Binomial()})
imp_data.update_all()
assert_equal(imp_data.data.shape[0], nrow)
assert_equal(imp_data.data.shape[1], ncol)
assert_allclose(orig[mx], imp_data.data[mx])
for j in range(1, 6):
if j == 3:
assert_equal(isinstance(imp_data.models['x3'], sm.GLM), True)
assert_equal(isinstance(imp_data.models['x3'].family, sm.families.Binomial), True)
assert_equal(isinstance(imp_data.results['x3'], GLMResultsWrapper), True)
else:
assert_equal(isinstance(imp_data.models['x%d' % j], sm.OLS), True)
assert_equal(isinstance(imp_data.results['x%d' % j], RegressionResultsWrapper), True)
fml = 'x1 ~ x3 + x4 + x3*x4'
assert_equal(imp_data.conditional_formula['x1'], fml)
fml = 'x4 ~ x1 + x2 + x3 + x5 + y'
assert_equal(imp_data.conditional_formula['x4'], fml)
assert_equal(imp_data._cycle_order, ['x5', 'x3', 'x4', 'y', 'x2', 'x1'])
@dec.skipif(not have_matplotlib)
def test_plot_missing_pattern(self):
df = gendat()
imp_data = mice.MICEData(df)
for row_order in "pattern", "raw":
for hide_complete_rows in False, True:
for color_row_patterns in False, True:
plt.clf()
fig = imp_data.plot_missing_pattern(row_order=row_order,
hide_complete_rows=hide_complete_rows,
color_row_patterns=color_row_patterns)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_plot_bivariate(self):
df = gendat()
imp_data = mice.MICEData(df)
imp_data.update_all()
plt.clf()
for plot_points in False, True:
fig = imp_data.plot_bivariate('x2', 'x4', plot_points=plot_points)
fig.get_axes()[0].set_title('plot_bivariate')
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_fit_obs(self):
df = gendat()
imp_data = mice.MICEData(df)
imp_data.update_all()
plt.clf()
for plot_points in False, True:
fig = imp_data.plot_fit_obs('x4', plot_points=plot_points)
fig.get_axes()[0].set_title('plot_fit_scatterplot')
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_plot_imputed_hist(self):
df = gendat()
imp_data = mice.MICEData(df)
imp_data.update_all()
plt.clf()
for plot_points in False, True:
fig = imp_data.plot_imputed_hist('x4')
fig.get_axes()[0].set_title('plot_imputed_hist')
close_or_save(pdf, fig)
class TestMICE(object):
def test_MICE(self):
df = gendat()
imp_data = mice.MICEData(df)
mi = mice.MICE("y ~ x1 + x2 + x1:x2", sm.OLS, imp_data)
result = mi.fit(1, 3)
assert(issubclass(result.__class__, mice.MICEResults))
# Smoke test for results
smr = result.summary()
def test_MICE1(self):
df = gendat()
imp_data = mice.MICEData(df)
mi = mice.MICE("y ~ x1 + x2 + x1:x2", sm.OLS, imp_data)
from statsmodels.regression.linear_model import RegressionResultsWrapper
for j in range(3):
x = mi.next_sample()
assert(issubclass(x.__class__, RegressionResultsWrapper))
def test_MICE2(self):
from statsmodels.genmod.generalized_linear_model import GLMResultsWrapper
df = gendat()
imp_data = mice.MICEData(df)
mi = mice.MICE("x3 ~ x1 + x2", sm.GLM, imp_data,
init_kwds={"family": sm.families.Binomial()})
for j in range(3):
x = mi.next_sample()
assert(isinstance(x, GLMResultsWrapper))
assert(isinstance(x.family, sm.families.Binomial))
def test_combine(self):
np.random.seed(3897)
x1 = np.random.normal(size=300)
x2 = np.random.normal(size=300)
y = x1 + x2 + np.random.normal(size=300)
x1[0:100] = np.nan
x2[250:] = np.nan
df = pd.DataFrame({"x1": x1, "x2": x2, "y": y})
idata = mice.MICEData(df)
mi = mice.MICE("y ~ x1 + x2", sm.OLS, idata, n_skip=20)
result = mi.fit(10, 20)
fmi = np.asarray([ 0.1920533 , 0.1587287 , 0.33174032])
assert_allclose(result.frac_miss_info, fmi, atol=1e-5)
params = np.asarray([-0.05397474, 0.97273307, 1.01652293])
assert_allclose(result.params, params, atol=1e-5)
tvalues = np.asarray([ -0.84781698, 15.10491582, 13.59998039])
assert_allclose(result.tvalues, tvalues, atol=1e-5)
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause |
dmccloskey/matplotlib_utilities | matplotlib_utilities/matplot_heatmap.py | 1 | 11427 | #HEATMAP
"""
This code was adapted from the following recipe:
* http://altanalyze.blogspot.se/2012/06/hierarchical-clustering-heatmaps-in.html
* http://code.activestate.com/recipes/578175/
Which was in turn inspired by many other posts:
* http://stackoverflow.com/questions/7664826
* http://stackoverflow.com/questions/2982929
* http://stackoverflow.com/questions/2455761
Running this with cosine or other distance metrics can often produce negative Z scores during clustering, so adjustments to the clustering may be required. Information about distance measures can be found here:
* http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html
* http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html
The documentation about the custom color gradients can be found here:
* http://matplotlib.sourceforge.net/examples/pylab_examples/custom_cmap.html
"""
# Built-in modules #
import random
# Third party modules #
import numpy, scipy, matplotlib, pandas
from matplotlib import pyplot
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
#import names
###############################################################################
# Create Custom Color Gradients #
red_black_sky = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.9), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0))}
red_black_blue = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0))}
red_black_green = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0))}
yellow_black_blue = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.8), (0.5, 0.1, 0.0), (1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0))}
make_cmap = lambda x: matplotlib.colors.LinearSegmentedColormap('my_colormap', x, 256)
color_gradients = {'red_black_sky' : make_cmap(red_black_sky),
'red_black_blue' : make_cmap(red_black_blue),
'red_black_green' : make_cmap(red_black_green),
'yellow_black_blue' : make_cmap(yellow_black_blue),
'red_white_blue' : pyplot.cm.bwr,
'seismic' : pyplot.cm.seismic,
'green_white_purple' : pyplot.cm.PiYG_r,
'coolwarm' : pyplot.cm.coolwarm,}
###############################################################################
class HiearchicalHeatmap(object):
"""A common use case for biologists analyzing their gene expression data is to cluster and visualize patterns of expression in the form of a heatmap and associated dendrogram."""
row_method = 'single' # Can be: linkage, single, complete, average, weighted, centroid, median, ward
column_method = 'single' # Can be: linkage, single, complete, average, weighted, centroid, median, ward
row_metric = 'braycurtis' # Can be: see scipy documentation
column_metric = 'braycurtis' # Can be: see scipy documentation
gradient_span = 'only_max' # Can be: min_to_max, min_to_max_centered, only_max, only_min
color_gradient = 'yellow_black_blue' # Can be: see color_gradients dictionary
fig_weight = 12
fig_height = 8.5
def plot(self):
# Names #
row_header = self.frame.index
column_header = self.frame.columns
# What color to use #
cmap = color_gradients[self.color_gradient]
# Scale the max and min colors #
value_min = self.frame.min().min()
value_max = self.frame.max().max()
if self.gradient_span == 'min_to_max_centered':
value_max = max([value_max, abs(value_min)])
value_min = value_max * -1
if self.gradient_span == 'only_max': value_min = 0
if self.gradient_span == 'only_min': value_max = 0
norm = matplotlib.colors.Normalize(value_min, value_max)
# Scale the figure window size #
fig = pyplot.figure(figsize=(self.fig_weight, self.fig_height))
# Calculate positions for all elements #
# ax1, placement of dendrogram 1, on the left of the heatmap
### The second value controls the position of the matrix relative to the bottom of the view
[ax1_x, ax1_y, ax1_w, ax1_h] = [0.05, 0.22, 0.2, 0.6]
width_between_ax1_axr = 0.004
### distance between the top color bar axis and the matrix
height_between_ax1_axc = 0.004
### Sufficient size to show
color_bar_w = 0.015
# axr, placement of row side colorbar #
### second to last controls the width of the side color bar - 0.015 when showing
[axr_x, axr_y, axr_w, axr_h] = [0.31, 0.1, color_bar_w, 0.6]
axr_x = ax1_x + ax1_w + width_between_ax1_axr
axr_y = ax1_y; axr_h = ax1_h
width_between_axr_axm = 0.004
# axc, placement of column side colorbar #
### last one controls the hight of the top color bar - 0.015 when showing
[axc_x, axc_y, axc_w, axc_h] = [0.4, 0.63, 0.5, color_bar_w]
axc_x = axr_x + axr_w + width_between_axr_axm
axc_y = ax1_y + ax1_h + height_between_ax1_axc
height_between_axc_ax2 = 0.004
# axm, placement of heatmap for the data matrix #
[axm_x, axm_y, axm_w, axm_h] = [0.4, 0.9, 2.5, 0.5]
axm_x = axr_x + axr_w + width_between_axr_axm
axm_y = ax1_y; axm_h = ax1_h
axm_w = axc_w
# ax2, placement of dendrogram 2, on the top of the heatmap #
### last one controls hight of the dendrogram
[ax2_x, ax2_y, ax2_w, ax2_h] = [0.3, 0.72, 0.6, 0.15]
ax2_x = axr_x + axr_w + width_between_axr_axm
ax2_y = ax1_y + ax1_h + height_between_ax1_axc + axc_h + height_between_axc_ax2
ax2_w = axc_w
# axcb - placement of the color legend #
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.07, 0.88, 0.18, 0.09]
# Compute and plot top dendrogram #
if self.column_method:
d2 = dist.pdist(self.frame.transpose())
D2 = dist.squareform(d2)
ax2 = fig.add_axes([ax2_x, ax2_y, ax2_w, ax2_h], frame_on=True)
Y2 = sch.linkage(D2, method=self.column_method, metric=self.column_metric)
Z2 = sch.dendrogram(Y2)
ind2 = sch.fcluster(Y2, 0.7*max(Y2[:,2]), 'distance')
ax2.set_xticks([])
ax2.set_yticks([])
### apply the clustering for the array-dendrograms to the actual matrix data
idx2 = Z2['leaves']
self.frame = self.frame.iloc[:,idx2]
### reorder the flat cluster to match the order of the leaves the dendrogram
ind2 = ind2[idx2]
else: idx2 = list(range(self.frame.shape[1]))
# Compute and plot left dendrogram #
if self.row_method:
d1 = dist.pdist(self.frame)
D1 = dist.squareform(d1)
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=True)
Y1 = sch.linkage(D1, method=self.row_method, metric=self.row_metric)
Z1 = sch.dendrogram(Y1, orientation='right')
ind1 = sch.fcluster(Y1, 0.7*max(Y1[:,2]), 'distance')
ax1.set_xticks([])
ax1.set_yticks([])
### apply the clustering for the array-dendrograms to the actual matrix data
idx1 = Z1['leaves']
self.frame = self.frame.iloc[idx1,:]
### reorder the flat cluster to match the order of the leaves the dendrogram
ind1 = ind1[idx1]
else: idx1 = list(range(self.frame.shape[0]))
# Plot distance matrix #
axm = fig.add_axes([axm_x, axm_y, axm_w, axm_h])
axm.matshow(self.frame, aspect='auto', origin='lower', cmap=cmap, norm=norm)
axm.set_xticks([])
axm.set_yticks([])
# Add text #
new_row_header = []
new_column_header = []
for i in range(self.frame.shape[0]):
axm.text(self.frame.shape[1]-0.5, i, ' ' + row_header[idx1[i]], verticalalignment="center")
new_row_header.append(row_header[idx1[i]] if self.row_method else row_header[i])
for i in range(self.frame.shape[1]):
axm.text(i, -0.9, ' '+column_header[idx2[i]], rotation=90, verticalalignment="top", horizontalalignment="center")
new_column_header.append(column_header[idx2[i]] if self.column_method else column_header[i])
# Plot column side colorbar #
if self.column_method:
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h])
cmap_c = matplotlib.colors.ListedColormap(['r', 'g', 'b', 'y', 'w', 'k', 'm'])
dc = numpy.array(ind2, dtype=int)
dc.shape = (1,len(ind2))
axc.matshow(dc, aspect='auto', origin='lower', cmap=cmap_c)
axc.set_xticks([])
axc.set_yticks([])
# Plot column side colorbar #
if self.row_method:
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h])
dr = numpy.array(ind1, dtype=int)
dr.shape = (len(ind1),1)
cmap_r = matplotlib.colors.ListedColormap(['r', 'g', 'b', 'y', 'w', 'k', 'm'])
axr.matshow(dr, aspect='auto', origin='lower', cmap=cmap_r)
axr.set_xticks([])
axr.set_yticks([])
# Plot color legend #
### axes for colorbar
axcb = fig.add_axes([axcb_x, axcb_y, axcb_w, axcb_h], frame_on=False)
cb = matplotlib.colorbar.ColorbarBase(axcb, cmap=cmap, norm=norm, orientation='horizontal')
axcb.set_title("colorkey")
max_cb_ticks = 5
axcb.xaxis.set_major_locator(pyplot.MaxNLocator(max_cb_ticks))
# Render the graphic #
if len(row_header)>50 or len(column_header)>50: pyplot.rcParams['font.size'] = 5
else: pyplot.rcParams['font.size'] = 8
# Return figure #
return fig, axm, axcb, cb
###############################################################################
class TestHeatmap(HiearchicalHeatmap):
short_name = 'test_heatmap'
def data(self, size=20):
"""Create some fake data in a dataframe"""
numpy.random.seed(0)
random.seed(0)
x = scipy.rand(size)
M = scipy.zeros([size,size])
for i in range(size):
for j in range(size): M[i,j] = abs(x[i] - x[j])
df = pandas.DataFrame(M, index=[names.get_last_name() for _ in range(size)],
columns=[names.get_first_name() for _ in range(size)])
df['Mary']['Day'] = 1.5
df['Issac']['Day'] = 1.0
return df
def plot(self):
self.frame = self.data()
self.path = '/tmp/' + self. short_name + '.pdf'
fig, axm, axcb, cb = HiearchicalHeatmap.plot(self)
cb.set_label("Random value")
pyplot.savefig(self.path)
###############################################################################
def test():
graph = TestHeatmap()
graph.plot()
return graph | mit |
aymeric-spiga/planetoplot | examples/ppclass_additional/myhisto.py | 1 | 2450 | #! /usr/bin/env python
from ppclass import pp
from netCDF4 import Dataset
from numpy import *
import numpy as np
import matplotlib.pyplot as mpl
## Author: Tanguy Bertrand
## Exemple d'histogramme
#
# INPUT :
#
# file
# nb_dataset: number of dataset that is number of superposed histograms
# Time range
# altitude range
# longitude range
# latitude range
# step: gap on xaxis between two ticks. Ex: step = 1 leads to one bar for each unit of the selected variable
# variable : u, v, w, icetot, tsurf ...
############################
filename="diagfi1.nc"
nb_dataset=3
tint=[["0.25,1.5"],["0.25,5.5"],["0.25,10.5"]] #Time must be as written in the input file
zint=[["0.1,10"],["0.1,10"],["0.1,10"]] #alt in km
xarea="-180,180"
yarea="-90,90"
step=5
var="u" #variable
############################
x=np.zeros(nb_dataset,dtype='object') #object of all datasets
bornemax=0. # initialisation boundary values
bornemin=0.
for i in range(nb_dataset):
myvar = pp(file=filename,var=var,t=tint[i],z=zint[i],x=xarea,y=yarea,compute="nothing").getf() # get data to be changed according to selected variable
data=np.ravel(myvar)
x[i]=np.array(data)
#upper lower bounds:
maxval=np.amax(myvar)
romax=round(maxval,0)
if abs(romax/maxval) < 1: romax=romax+1
minval=np.amin(myvar)
romin=round(minval,0)
if abs(romin/minval) < 1: romin=romin-1
bornemax=np.amax([romax,bornemax])
bornemin=np.amin([romin,bornemin])
#
print 'minval,maxval=',minval,romax
print 'romin,romax=',romin,romax
print 'bornemin,bornemax=',bornemin,bornemax
# bins definition:
bins=np.arange(bornemin,bornemax+1,step)
vect=[x[0],x[1],x[2]] #vecteur of datasets to be changed according to the number of desired datasets
## PLOT
# legend:
zelab=['time: '+str(tint[0])+' - alt: '+str(zint[0])+' km','time: '+str(tint[1])+' - alt: '+str(zint[1])+' km','time: '+str(tint[2])+' - alt: '+str(zint[2])+' km']
# Histogram : normed, probability
mpl.hist(vect,bins,normed=1,histtype='bar',align='mid',rwidth=0.8,label=zelab)
mpl.legend(prop={'size':20})
mpl.title('U wind distribution',fontsize=20)
mpl.xlabel('Horizontal Wind (m.s-1)',fontsize=20)
mpl.ylabel('Probability',fontsize=20)
mpl.xticks(bins,fontsize=16)
mpl.yticks(fontsize=16)
mpl.grid(True)
mpl.show()
mpl.figure(5)
| gpl-2.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_NonLinHardShear/Area/A_1e-4/Normal_Stress_Plot.py | 72 | 2800 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
hydroshare/hydroshare2 | ga_resources/views/ows.py | 2 | 9987 | import json
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from ga_ows.views import wms, wfs
from ga_resources import models, dispatch
from ga_resources.drivers import shapefile, render, CacheManager
from ga_resources.models import RenderedLayer
from ga_resources.utils import authorize
from matplotlib.finance import md5
from osgeo import osr, ogr
class WMSAdapter(wms.WMSAdapterBase):
def layerlist(self):
return [l.slug for l in models.RenderedLayer.objects.all()]
def get_2d_dataset(self, layers, srs, bbox, width, height, styles, bgcolor, transparent, time, elevation, v, filter,
**kwargs):
"""use the driver to render a tile"""
return render(kwargs['format'], width, height, bbox, srs, styles, layers, **kwargs)
def get_feature_info(self, wherex, wherey, layers, callback, format, feature_count, srs, filter, fuzziness=0,
**kwargs): # fuzziness of 30 meters by default
"""use the driver to get feature info"""
if srs.lower().startswith('epsg'):
s = osr.SpatialReference()
s.ImportFromEPSG(int(srs[5:]))
srs = s.ExportToProj4()
feature_info = {
layer: models.RenderedLayer.objects.get(slug=layer).data_resource.driver_instance.get_data_for_point(
wherex, wherey, srs, fuzziness=fuzziness, **kwargs
)
for layer in layers}
return feature_info
def nativesrs(self, layer):
"""Use the resource record to get native SRS"""
resource = models.RenderedLayer.objects.get(slug=layer).data_resource
return resource.native_srs
def nativebbox(self, layer=None):
"""Use the resource record to get the native bounding box"""
if layer:
resource = models.RenderedLayer.objects.get(slug=layer).data_resource
return resource.native_bounding_box.extent
else:
return (-180, -90, 180, 90)
def styles(self):
"""Use the resource record to get the available styles"""
return list(models.Style.objects.all())
def get_layer_descriptions(self):
"""
This should return a list of dictionaries. Each dictionary should follow this format::
{ ""name"" : layer_"name",
"title" : human_readable_title,
"srs" : spatial_reference_id,
"queryable" : whether or not GetFeatureInfo is supported for this layer,
"minx" : native_west_boundary,
"miny" : native_south_boundary,
"maxx" : native_east_boundary,
"maxy" : native_north_boundary,
"ll_minx" : west_boundary_epsg4326,
"ll_miny" : south_boundary_epsg4326,
"ll_maxx" : east_boundary_epsg4326,
"ll_maxy" : north_boundary_epsg4326,
"styles" : [list_of_style_descriptions]
Each style description in list_of_style_descriptions should follow this format::
{ ""name"" : style_"name",
"title" : style_title,
"legend_width" : style_legend_width,
"legend_height" : style_legend_height,
"legend_url" : style_legend_url
}
"""
layers = models.RenderedLayer.objects.all()
ret = []
for layer in layers:
desc = {}
ret.append(desc)
desc["name"] = layer.slug
desc['title'] = layer.title
desc['srs'] = layer.data_resource.native_srs
desc['queryable'] = True
desc['minx'], desc['miny'], desc['maxx'], desc[
'maxy'] = layer.data_resource.native_bounding_box.extent # FIXME this is not native
desc['ll_minx'], desc['ll_miny'], desc['ll_maxx'], desc[
'll_maxy'] = layer.data_resource.bounding_box.extent
desc['styles'] = []
desc['styles'].append({
"name": layer.default_style.slug,
'title': layer.default_style.title,
'legend_width': layer.default_style.legend_width,
'legend_height': layer.default_style.legend_height,
'legend_url': layer.default_style.legend.url if layer.default_style.legend else ""
})
for style in layer.styles.all():
desc['styles'].append({
"name": style.slug,
'title': style.title,
'legend_width': style.legend_width,
'legend_height': style.legend_height,
'legend_url': style.legend.url if style.legend else ""
})
return ret
def get_service_boundaries(self):
"""Just go ahead and return the world coordinates"""
return {
"minx": -180.0,
"miny": -90.0,
"maxx": 180.0,
"maxy": 90.0
}
class WMS(wms.WMS):
adapter = WMSAdapter([])
class WFSAdapter(wfs.WFSAdapter):
def get_feature_descriptions(self, request, *types):
namespace = request.build_absolute_uri().split('?')[
0] + "/schema" # todo: include https://bitbucket.org/eegg/django-model-schemas/wiki/Home
for type_name in types:
res = get_object_or_404(models.DataResource, slug=type_name)
yield wfs.FeatureDescription(
ns=namespace,
ns_name='ga_resources',
name=res.slug,
abstract=res.description,
title=res.title,
keywords=res.keywords,
srs=res.native_srs,
bbox=res.bounding_box,
schema=namespace + '/' + res.slug
)
def list_stored_queries(self, request):
"""list all the queries associated with drivers"""
sq = super(WFSAdapter, self).list_stored_queries(request)
return sq
def get_features(self, request, parms):
if parms.cleaned_data['stored_query_id']:
squid = "SQ_" + parms.cleaned_data['stored_query_id']
slug = parms.cleaned_data['type_names'] if isinstance(parms.cleaned_data['type_names'], basestring) else \
parms.cleaned_data['type_names'][0]
try:
return models.DataResource.driver_instance.query_operation(squid)(request, **parms.cleaned_data)
except:
raise wfs.OperationNotSupported.at('GetFeatures', 'stored_query_id={squid}'.format(squid=squid))
else:
return self.AdHocQuery(request, **parms.cleaned_data)
def AdHocQuery(self, req,
type_names=None,
filter=None,
filter_language=None,
bbox=None,
sort_by=None,
count=None,
start_index=None,
srs_name=None,
srs_format=None,
max_features=None,
**kwargs
):
model = get_object_or_404(models.DataResource, slug=type_names[0])
driver = model.driver_instance
extra = {}
if filter:
extra['filter'] = json.loads(filter)
if bbox:
extra['bbox'] = bbox
if srs_name:
srs = osr.SpatialReference()
if srs_name.lower().startswith('epsg'):
srs.ImportFromEPSG(int(srs_name[5:]))
else:
srs.ImportFromProj4(srs_name)
extra['srs'] = srs
else:
srs = model.srs
if start_index:
extra['start'] = start_index
count = count or max_features
if count:
extra['count'] = count
if "boundary" in kwargs:
extra['boundary'] = kwargs['boundary']
extra['boundary_type'] = kwargs['boundary_type']
df = driver.as_dataframe(**extra)
if sort_by:
extra['sort_by'] = sort_by
if filter_language and filter_language != 'json':
raise wfs.OperationNotSupported('filter language must be JSON for now')
filename = md5()
filename.update("{name}.{bbox}.{srs_name}x{filter}".format(
name=type_names[0],
bbox=','.join(str(b) for b in bbox),
srs_name=srs_name,
filter=filter
))
filename = filename.hexdigest()
shapefile.ShapefileDriver.from_dataframe(df, filename, srs)
ds = ogr.Open(filename)
return ds
def supports_feature_versioning(self):
return False
class WFS(wfs.WFS):
adapter = WFSAdapter()
def tms(request, layer, z, x, y, **kwargs):
z = int(z)
x = int(x)
y = int(y)
table = None
if '#' in layer:
layer_slug, table = layer.split('#')
else:
layer_slug = layer
layer_instance = RenderedLayer.objects.get(slug=layer_slug)
user = authorize(request, page=layer, view=True)
dispatch.api_accessed.send(RenderedLayer, instance=layer_instance, user=user)
style = request.GET.get('styles', layer_instance.default_style.slug)
tms = CacheManager.get().get_tile_cache([layer], [style])
return HttpResponse(tms.fetch_tile(z, x, y), mimetype='image/png')
def seed_layer(request, layer):
mnz = int(request.GET['minz'])
mxz = int(request.GET['maxz']) # anything greater would cause a DOS attack. We should do it manually
mnx = int(request.GET['minx'])
mxx = int(request.GET['maxx'])
mny = int(request.GET['miny'])
mxy = int(request.GET['maxy'])
layer = RenderedLayer.objects.get(slug=layer)
style = request.GET.get('style', layer.default_style)
user = authorize(request, page=layer, edit=True)
dispatch.api_accessed.send(RenderedLayer, instance=layer, user=user)
CacheManager.get().get_tile_cache(layers=[layer], styles=[style]).seed_tiles(mnz, mxz, mnx, mny, mxx, mxy)
return HttpResponse()
| bsd-3-clause |
jjberry/Autotrace | matlab-version/LinguaView.py | 3 | 5404 | #!/usr/bin/env python
import sys
import os
import neutralContour as nc
import LabelWindow as lw
import AnalysisWindow as aw
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import gobject
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
class LinguaViewer:
"""This is the class for the main window of LinguaViewer"""
def __init__(self, datafiles=[]):
#self.static_dir = '/Users/jeff/autotracer/trunk/LinguaViewer/'
self.gladefile = "LinguaViewer.glade"
self.wTree = gtk.glade.XML(self.gladefile, "mainwindow")
self.win = self.wTree.get_widget("mainwindow")
self.win.set_title("LinguaView")
self.mainVBox = self.wTree.get_widget("vbox2")
dic = { "on_mainwindow_destroy": gtk.main_quit,
"on_quit_activate" : gtk.main_quit,
"on_open_activate" : self.onOpen,
"on_tbOpen_clicked" : self.onOpen,
"on_tbView_clicked": self.onView,
"on_tbLabel1_clicked": self.onLabel,
"on_tbRemove_clicked" : self.onRemove,
"on_tbAnalyze_clicked" : self.onAnalyze,
"on_showlinguagram_toggled": self.showlinguagram,
"on_showneutral_toggled": self.showneutral,
"on_showwave_toggled": self.showwave,
"on_showspec_toggled": self.showspec}
self.wTree.signal_autoconnect(dic)
self.SHOW_LING = False
self.SHOW_NEUT = False
self.SHOW_WAVE = False
self.SHOW_SPEC = False
self.linguaToggle = self.wTree.get_widget("showlinguagram")
self.neutralToggle = self.wTree.get_widget("showneutral")
self.neutralToggle.set_active(True)
self.waveToggle = self.wTree.get_widget("showwave")
self.waveToggle.set_active(True)
self.specToggle = self.wTree.get_widget("showspec")
self.specToggle.set_active(True)
self.TreeView = self.wTree.get_widget("treeview1")
column = gtk.TreeViewColumn("Data Files", gtk.CellRendererText(), text=0)
column.set_resizable(True)
column.set_sort_column_id(0)
self.TreeView.append_column(column)
self.DataList = gtk.ListStore(str)
self.TreeView.set_model(self.DataList)
self.datafiles = datafiles
if len(self.datafiles) > 0:
for i in self.datafiles:
self.DataList.append([i])
self.labelInd = 0
def onOpen(self, event):
fc = gtk.FileChooserDialog(title='Open Data Files', parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(True)
ffilter = gtk.FileFilter()
ffilter.set_name('.csv Files')
ffilter.add_pattern('*.csv')
fc.add_filter(ffilter)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.datafiles = fc.get_filenames()
g_directory = fc.get_current_folder()
for i in self.datafiles:
self.DataList.append([i])
fc.destroy()
def onRemove(self, event):
selection = self.TreeView.get_selection()
model, select_iter = selection.get_selected()
if (select_iter):
self.DataList.remove(select_iter)
def onView(self, event):
selection = self.TreeView.get_selection()
model, select_iter = selection.get_selected()
if (select_iter):
fname = self.DataList.get_value(select_iter, 0)
n = fname.split('/')
neutralfname = '/'.join(n[:-1]) + '/neutral.csv'
nc.NeutralTongue(fname, neutralfname, self.SHOW_LING, self.SHOW_NEUT, self.SHOW_WAVE, self.SHOW_SPEC)
def onLabel(self, event):
selection = self.TreeView.get_selection()
model, select_iter = selection.get_selected()
if (select_iter):
fname = self.DataList.get_value(select_iter, 0)
n = fname.split('/')
neutralfname = '/'.join(n[:-1]) + '/neutral.csv'
lw.LabelWindow([fname], self.SHOW_LING, self.SHOW_NEUT, self.SHOW_WAVE, self.SHOW_SPEC)
def onAnalyze(self, event):
aw.AnalysisWindow(self.datafiles)
def showlinguagram(self, event):
if self.SHOW_LING == False:
self.SHOW_LING = True
else:
self.SHOW_LING = False
def showneutral(self, event):
if self.SHOW_NEUT == False:
self.SHOW_NEUT = True
else:
self.SHOW_NEUT = False
def showwave(self, event):
if self.SHOW_WAVE == False:
self.SHOW_WAVE = True
else:
self.SHOW_WAVE = False
def showspec(self, event):
if self.SHOW_SPEC == False:
self.SHOW_SPEC = True
else:
self.SHOW_SPEC = False
if __name__ == "__main__":
LinguaViewer()
gtk.main()
| mit |
Opendigitalradio/ODR-StaticPrecorrection | analyze_aligned_rx_tx.py | 1 | 3969 | """
Generate analytic plots from aligned TX/RX recordings.
"""
import argparse
import numpy as np
import matplotlib.pyplot as plt
import src.dab_util as du
import scipy.stats as st
from sklearn.neighbors import KernelDensity
import os
SCATTER_SIZE = 0.0001
def kde2D(x, y, bandwidth, xbins=256j, ybins=256j, **kwargs):
xx, yy = np.mgrid[x.min():x.max():xbins,
y.min():y.max():ybins]
xy_sample = np.vstack([yy.ravel(), xx.ravel()]).T
xy_train = np.vstack([y, x]).T
kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)
kde_skl.fit(xy_train)
z = np.exp(kde_skl.score_samples(xy_sample))
return xx, yy, np.reshape(z, xx.shape)
def plot_density(x, y, scatter=False, path=None):
x = np.abs(x)
y = np.abs(y)
x = x - np.min(x)
x = x / np.max(x)
y = y - np.min(y)
y = y / np.max(y)
max_val = max(np.max(x), np.max(y))
min_val = max(np.min(x), np.min(y))
xx, yy, zz = kde2D(x, y, (max_val - min_val) * 0.01)
plt.pcolormesh(xx, yy, zz)
plt.xlabel("Normalized Absolute TX Amplitude")
plt.ylabel("Normalized Absolute RX Amplitude")
plt.savefig(path)
def scatter(x_pos, y_pos, x_neg, y_neg, path=None):
x_pos, y_pos, x_neg, y_neg = np.abs(x_pos), np.abs(y_pos), np.abs(x_neg), np.abs(y_neg)
plt.scatter(x_pos, y_pos, s=SCATTER_SIZE, facecolor='blue', label="Positive TX/RX")
plt.scatter(x_neg, y_neg, s=SCATTER_SIZE, facecolor='red', label="Negative TX/RX")
plt.xlabel("Absolute TX Amplitude")
plt.ylabel("Absolute RX Amplitude")
plt.legend()
plt.savefig(path)
plt.clf()
def scatter_phase(x_pos, y_pos, x_neg, y_neg, path=None):
x_pos_abs = np.abs(x_pos)
x_neg_abs = np.abs(x_neg)
phase_diff_pos = np.angle(x_pos, deg=True) - np.angle(y_pos, deg=True)
phase_diff_neg = np.angle(x_neg, deg=True) - np.angle(y_neg, deg=True)
phase_diff_pos = np.mod(phase_diff_pos, 180)
phase_diff_neg = np.mod(phase_diff_neg, 180)
plt.scatter(x_pos_abs, phase_diff_pos, s=SCATTER_SIZE, facecolor='blue', label="Positive TX/RX")
plt.scatter(x_neg_abs, phase_diff_neg, s=SCATTER_SIZE, facecolor='red', label="Negative TX/TX")
plt.ylabel("Phase difference")
plt.xlabel("Absolute Amplitude")
plt.legend()
plt.savefig(path)
plt.clf()
def plot_time(rx_rec, tx_rec, path=None, samples=256):
plt.plot(np.angle(rx_rec[:256]), c='blue', label="RX")
plt.plot(np.angle(tx_rec[:256]), c='red', label="TX")
plt.ylabel("Phase")
plt.xlabel("Sample")
plt.savefig(path)
plt.clf()
def main():
if not os.path.isdir(FLAGS.out_dir):
os.makedirs(FLAGS.out_dir)
rx_rec = du.fromfile(filename=FLAGS.rx_path)
tx_rec = du.fromfile(filename=FLAGS.tx_path)
sel_pos = (rx_rec > 0) & (tx_rec > 0)
rx_rec_pos = rx_rec[sel_pos]
tx_rec_pos = tx_rec[sel_pos]
sel_pos = (rx_rec < 0) & (tx_rec < 0)
rx_rec_neg = rx_rec[sel_pos]
tx_rec_neg = tx_rec[sel_pos]
scatter(tx_rec_pos, rx_rec_pos, tx_rec_neg, rx_rec_neg, path=FLAGS.out_dir + '/am_am_scatter.pdf')
scatter_phase(tx_rec_pos, rx_rec_pos, tx_rec_neg, rx_rec_neg, path=FLAGS.out_dir + '/am_pm_pos_scatter.pdf')
plot_time(rx_rec, tx_rec, path=FLAGS.out_dir + '/phase_over_time.pdf')
plot_density(tx_rec_pos, rx_rec_pos, path=FLAGS.out_dir + '/am_am_pos.pdf')
plot_density(tx_rec_neg, rx_rec_neg, path=FLAGS.out_dir + '/am_am_neg.pdf')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--rx_path',
type=str,
default='/tmp/record/2_rx_record.iq',
help="Path to complex64 rx recording"
)
parser.add_argument(
'--tx_path',
type=str,
default='/tmp/record/2_tx_record.iq',
help="Path to complex64 tx recording"
)
parser.add_argument(
'--out_dir',
type=str,
default='/tmp/analyze_aligned_rx_tx',
help="Output path"
)
FLAGS, unparsed = parser.parse_known_args()
main()
| mit |
ch3ll0v3k/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
nuclear-wizard/moose | modules/combined/test/tests/inelastic_strain/elas_plas/plot_cycled_stress.py | 21 | 1045 | #!/opt/moose/miniconda/bin/python
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import pylab
data = np.genfromtxt('./elas_plas_nl1_cycle_out.csv', delimiter=',', names=True)
fig = plt.figure()
ax1 = fig.add_subplot(111)
mpl.rcParams.update({'font.size': 10})
ax1.set_xlabel("Time")
ax1.set_ylabel("Stress (MPa)")
ax1.plot(data['time'], data['stress_yy'], label='Stress YY', color='k')
ax1.plot(data['time'], data['vonmises'], label='Vonmises', color='b')
ax1.plot(data['time'], data['pressure'], label='Pressure', color='r')
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
leg = ax1.legend(loc='best')
plt.savefig('plot_cycled_stress.pdf')
plt.show(fig)
| lgpl-2.1 |
eteq/bokeh | bokeh/charts/builder/timeseries_builder.py | 17 | 6098 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries class which lets you build your TimeSeries charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def TimeSeries(values, index=None, xscale='datetime', **kws):
""" Create a timeseries chart using
:class:`TimeSeriesBuilder <bokeh.charts.builder.timeseries_builder.TimeSeriesBuilder>`
to render the lines from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
import datetime
from bokeh.charts import TimeSeries, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
ts = TimeSeries(xyvalues, index='Date', title="TimeSeries", legend="top_left",
ylabel='Languages')
output_file('timeseries.html')
show(ts)
"""
return create_and_build(
TimeSeriesBuilder, values, index=index, xscale=xscale, **kws
)
class TimeSeriesBuilder(Builder):
"""This is the TimeSeries class and it is in charge of plotting
TimeSeries charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Take the x/y data from the timeseries values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# necessary to make all formats and encoder happy with array, blaze, ...
xs = list([x for x in self._values_index])
for col, values in self._values.items():
if isinstance(self.index, string_types) \
and col == self.index:
continue
# save every the groups available in the incomming input
self._groups.append(col)
self.set_and_get("x_", col, xs)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""Push the TimeSeries data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1::2]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the time series.
Takes reference points from the data loaded at the ColumnDataSource.
"""
self._duplet = list(chunk(self._attr, 2))
colors = cycle_colors(self._duplet, self.palette)
for i, (x, y) in enumerate(self._duplet, start=1):
glyph = Line(x=x, y=y, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
| bsd-3-clause |
bthirion/scikit-learn | sklearn/neighbors/unsupervised.py | 29 | 4756 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
maheshakya/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
numenta/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/geo.py | 69 | 19738 | import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| agpl-3.0 |
BhallaLab/moose-full | moose-examples/paper-2015/Fig4_ReacDiff/rxdSpineSize.py | 6 | 11226 | ##################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2015 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
##
## rxdSpineSize.py: Builds a cell with spines and a propagating reaction
## wave. Products diffuse into the spine and cause it to get bigger.
##################################################################
import math
import pylab
import numpy
import matplotlib.pyplot as plt
import moose
import sys
sys.path.append( '../util' )
import rdesigneur as rd
from PyQt4 import QtGui
import moogli
import moogli.extensions.moose
import matplotlib
PI = 3.141592653
ScalingForTesting = 10
RM = 1.0 / ScalingForTesting
RA = 1.0 * ScalingForTesting
CM = 0.01 * ScalingForTesting
runtime = 100.0
frameruntime = 1.0
diffConst = 5e-12
dendLen = 100e-6
diffLen = 1.0e-6
dendDia = 2e-6
somaDia = 5e-6
concInit = 0.001 # 1 millimolar
spineSpacing = 10e-6
spineSpacingDistrib = 1e-6
spineSize = 1.0
spineSizeDistrib = 0.5
spineAngle= numpy.pi / 2.0
spineAngleDistrib = 0.0
def makeCellProto( name ):
elec = moose.Neuron( '/library/' + name )
ecompt = []
soma = rd.buildCompt( elec, 'soma', somaDia, somaDia, -somaDia, RM, RA, CM )
dend = rd.buildCompt( elec, 'dend', dendLen, dendDia, 0, RM, RA, CM )
moose.connect( soma, 'axial', dend, 'raxial' )
elec.buildSegmentTree()
def makeChemProto( name ):
chem = moose.Neutral( '/library/' + name )
comptVol = diffLen * dendDia * dendDia * PI / 4.0
for i in ( ['dend', comptVol], ['spine', 1e-19], ['psd', 1e-20] ):
print 'making ', i
compt = moose.CubeMesh( chem.path + '/' + i[0] )
compt.volume = i[1]
#x = moose.Pool( compt.path + '/x' )
#y = moose.BufPool( compt.path + '/y' )
z = moose.Pool( compt.path + '/z' )
#x.concInit = 0.0
#x.diffConst = diffConst
#y.concInit = concInit
z.concInit = 0.0
z.diffConst = diffConst
nInit = comptVol * 6e23 * concInit
nstr = str( 1/nInit)
x = moose.Pool( chem.path + '/dend/x' )
x.diffConst = diffConst
func = moose.Function( x.path + '/func' )
func.expr = "-x0 * (0.3 - " + nstr + " * x0) * ( 1 - " + nstr + " * x0)"
print func.expr
func.x.num = 1
moose.connect( x, 'nOut', func.x[0], 'input' )
moose.connect( func, 'valueOut', x, 'increment' )
z = moose.element( '/library/' + name + '/dend/z' )
reac = moose.Reac( '/library/' + name + '/dend/reac' )
reac.Kf = 1
reac.Kb = 10
moose.connect( reac, 'sub', x, 'reac' )
moose.connect( reac, 'prd', z, 'reac' )
def makeSpineProto2( name ):
spine = moose.Neutral( '/library/' + name )
shaft = rd.buildCompt( spine, 'shaft', 0.5e-6, 0.4e-6, 0, RM, RA, CM )
head = rd.buildCompt( spine, 'head', 0.5e-6, 0.5e-6, 0.5e-6, RM, RA, CM )
moose.connect( shaft, 'axial', head, 'raxial' )
def makeModel():
moose.Neutral( '/library' )
makeCellProto( 'cellProto' )
makeChemProto( 'cProto' )
makeSpineProto2( 'spine' )
rdes = rd.rdesigneur( useGssa = False, \
combineSegments = False, \
stealCellFromLibrary = True, \
meshLambda = 1e-6, \
cellProto = [['cellProto', 'elec' ]] ,\
spineProto = [['spineProto', 'spine' ]] ,\
chemProto = [['cProto', 'chem' ]] ,\
spineDistrib = [ \
['spine', '#', \
'spacing', str( spineSpacing ), \
'spacingDistrib', str( spineSpacingDistrib ), \
'angle', str( spineAngle ), \
'angleDistrib', str( spineAngleDistrib ), \
'size', str( spineSize ), \
'sizeDistrib', str( spineSizeDistrib ) ] \
], \
chemDistrib = [ \
[ "chem", "dend", "install", "1" ] \
],
adaptorList = [ \
[ 'psd/z', 'n', 'spine', 'psdArea', 10.0e-15, 300e-15 ], \
] \
)
rdes.buildModel( '/model' )
x = moose.vec( '/model/chem/dend/x' )
x.concInit = 0.0
for i in range( 0,20 ):
x[i].concInit = concInit
def makePlot( name, srcVec, field ):
tab = moose.Table2('/graphs/' + name + 'Tab', len( srcVec ) ).vec
for i in zip(srcVec, tab):
moose.connect(i[1], 'requestOut', i[0], field)
return tab
def displayPlots():
for x in moose.wildcardFind( '/graphs/#[0]' ):
tab = moose.vec( x )
for i in range( len( tab ) ):
pylab.plot( tab[i].vector, label=x.name[:-3] + " " + str( i ) )
pylab.legend()
pylab.figure()
def main():
"""
This illustrates the use of rdesigneur to build a simple dendrite with
spines, and then to resize them using spine fields. These are the
fields that would be changed dynamically in a simulation with reactions
that affect spine geometry.
In this simulation there is a propagating reaction wave using a
highly abstracted equation, whose product diffuses into the spines and
makes them bigger.
"""
makeModel()
elec = moose.element( '/model/elec' )
elec.setSpineAndPsdMesh( moose.element('/model/chem/spine'), moose.element('/model/chem/psd') )
eHead = moose.wildcardFind( '/model/elec/#head#' )
oldDia = [ i.diameter for i in eHead ]
graphs = moose.Neutral( '/graphs' )
#makePlot( 'psd_x', moose.vec( '/model/chem/psd/x' ), 'getN' )
#makePlot( 'head_x', moose.vec( '/model/chem/spine/x' ), 'getN' )
makePlot( 'dend_x', moose.vec( '/model/chem/dend/x' ), 'getN' )
makePlot( 'dend_z', moose.vec( '/model/chem/dend/z' ), 'getN' )
makePlot( 'head_z', moose.vec( '/model/chem/spine/z' ), 'getN' )
makePlot( 'psd_z', moose.vec( '/model/chem/psd/z' ), 'getN' )
makePlot( 'headDia', eHead, 'getDiameter' )
'''
debug = moose.PyRun( '/pyrun' )
debug.tick = 10
debug.runString = """print "RUNNING: ", moose.element( '/model/chem/psd/z' ).n, moose.element( '/model/elec/head0' ).diameter"""
'''
moose.reinit()
moose.start( runtime )
displayPlots()
pylab.plot( oldDia, label = 'old Diameter' )
pylab.plot( [ i.diameter for i in eHead ], label = 'new Diameter' )
pylab.legend()
pylab.show()
app = QtGui.QApplication(sys.argv)
#widget = mv.MoogliViewer( '/model' )
morphology = moogli.read_morphology_from_moose( name="", path = '/model/elec' )
widget = moogli.MorphologyViewerWidget( morphology )
widget.show()
return app.exec_()
quit()
# Run the 'main' if this script is executed standalone.
def showVisualization():
makeModel()
elec = moose.element( '/model/elec' )
elec.setSpineAndPsdMesh( moose.element('/model/chem/spine'), moose.element('/model/chem/psd') )
eHead = moose.wildcardFind( '/model/elec/#head#' )
oldDia = [ i.diameter for i in eHead ]
graphs = moose.Neutral( '/graphs' )
#makePlot( 'psd_x', moose.vec( '/model/chem/psd/x' ), 'getN' )
#makePlot( 'head_x', moose.vec( '/model/chem/spine/x' ), 'getN' )
makePlot( 'dend_x', moose.vec( '/model/chem/dend/x' ), 'getN' )
dendZ = makePlot( 'dend_z', moose.vec( '/model/chem/dend/z' ), 'getN' )
makePlot( 'head_z', moose.vec( '/model/chem/spine/z' ), 'getN' )
psdZ = makePlot( 'psd_z', moose.vec( '/model/chem/psd/z' ), 'getN' )
diaTab = makePlot( 'headDia', eHead, 'getDiameter' )
# print diaTab[0].vector[-1]
# return
dendrite = moose.element("/model/elec/dend")
dendrites = [dendrite.path + "/" + str(i) for i in range(len(dendZ))]
# print dendrites
moose.reinit()
spineHeads = moose.wildcardFind( '/model/elec/#head#')
# print moose.wildcardFind( '/model/elec/##')
# print "dendZ", readValues(dendZ)
# print dendrite
app = QtGui.QApplication(sys.argv)
viewer = create_viewer("/model/elec", dendrite, dendZ, diaTab, psdZ)
viewer.showMaximized()
viewer.start()
return app.exec_()
def create_viewer(path, moose_dendrite, dendZ, diaTab, psdZ):
network = moogli.extensions.moose.read(path=path,
vertices=15)
dendrite = network.groups["dendrite"].shapes[moose_dendrite.path]
chem_compt_group = dendrite.subdivide(len(dendZ))
normalizer = moogli.utilities.normalizer(0.0,
300.0,
clipleft=True,
clipright=True)
colormap = moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow)
mapper = moogli.utilities.mapper(colormap, normalizer)
def readValues(tables):
values = []
for i in range(len(tables)):
values.append(tables[i].vector[-1])
return values
def prelude(view):
view.home()
view.pitch(math.pi / 3.0)
view.zoom(0.3)
network.groups["soma"].set("color", moogli.colors.RED)
network.groups["spine"].groups["shaft"].set("color",
moogli.colors.RED)
def interlude(view):
moose.start(frameruntime)
network.groups["spine"].groups["head"].set("radius",
readValues(diaTab),
lambda x: x * 0.5e6)
network.groups["spine"].groups["head"].set("color",
readValues(psdZ),
mapper)
chem_compt_group.set("color",
readValues(dendZ),
mapper)
if moose.element("/clock").currentTime >= runtime:
view.stop()
viewer = moogli.Viewer("Viewer")
viewer.attach_shapes(network.shapes.values())
viewer.detach_shape(dendrite)
viewer.attach_shapes(chem_compt_group.shapes.values())
view = moogli.View("main-view",
prelude=prelude,
interlude=interlude)
cb = moogli.widgets.ColorBar(id="cb",
title="Molecule #",
text_color=moogli.colors.BLACK,
position=moogli.geometry.Vec3f(0.975, 0.5, 0.0),
size=moogli.geometry.Vec3f(0.30, 0.05, 0.0),
text_font="/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-R.ttf",
orientation=math.pi / 2.0,
text_character_size=20,
label_formatting_precision=0,
colormap=moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow),
color_resolution=100,
scalar_range=moogli.geometry.Vec2f(0.0,
300.0))
view.attach_color_bar(cb)
viewer.attach_view(view)
return viewer
if __name__ == '__main__':
showVisualization()
| gpl-2.0 |
ebachelet/pyLIMA | setup.py | 1 | 1036 | from setuptools import setup, find_packages, Extension
setup(
name="pyLIMA",
version="0.8.3",
description="Microlsening analysis package.",
keywords='Microlsening analysis package.',
author="Etienne Bachelet",
author_email="[email protected]",
license='GPL-3.0',
url="http://github.com/ebachelet/pyLIMA",
download_url = 'https://github.com/ebachelet/pyLIMA/archive/0.1.tar.gz',
packages=find_packages('.'),
include_package_data=True,
install_requires=['scipy','numpy','matplotlib','astropy','emcee','numba','bokeh','PyAstronomy','VBBinaryLensing','Cython'],
python_requires='>=3.6,<4',
test_suite="nose.collector",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3',
],
package_data={
'': ['Claret2011.fits','Yoo_B0B1.dat'],
},
)
| gpl-3.0 |
sgenoud/scikit-learn | sklearn/linear_model/__init__.py | 2 | 1329 | """
The :mod:`sklearn.linear_model` module implements genelarized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import Lars, LassoLars, lars_path, LARS, LassoLARS, \
LarsCV, LassoLarsCV, LassoLarsIC
from .coordinate_descent import Lasso, ElasticNet, LassoCV, ElasticNetCV, \
lasso_path, enet_path
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV, \
ridge_regression
from .logistic import LogisticRegression
from .omp import orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit
from .perceptron import Perceptron
from .randomized_l1 import RandomizedLasso, RandomizedLogisticRegression, \
lasso_stability_path
from . import sparse
| bsd-3-clause |
dmsurti/mayavi | mayavi/tools/figure.py | 1 | 11152 | """
Functions related to creating the engine or the figures.
"""
# Author: Gael Varoquaux <[email protected]>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import gc
import warnings
import copy
import numpy as np
# Enthought imports
from pyface.timer.api import do_later
# imports
from tvtk.api import tvtk
from mayavi.core.scene import Scene
from mayavi.core.registry import registry
from .camera import view
from .engine_manager import get_engine, options, set_engine
######################################################################
# A list to store the allocated scene numbers
__scene_number_list = set((0,))
def figure(figure=None, bgcolor=None, fgcolor=None, engine=None,
size=(400, 350)):
""" Creates a new scene or retrieves an existing scene. If the mayavi
engine is not running this also starts it.
**Keyword arguments**
:figure: The name of the figure, or handle to it.
:bgcolor: The color of the background (None is default).
:fgcolor: The color of the foreground, that is the color of all text
annotation labels (axes, orientation axes, scalar bar
labels). It should be sufficiently far from `bgcolor`
to see the annotation texts. (None is default).
:engine: The mayavi engine that controls the figure.
:size: The size of the scene created, in pixels. May not apply
for certain scene viewer.
"""
if isinstance(figure, Scene):
if figure.scene is None:
engine = registry.find_scene_engine(figure)
else:
engine = registry.find_scene_engine(figure.scene)
set_engine(engine)
engine.current_scene = figure
else:
if engine is None:
engine = get_engine()
if figure is None:
name = max(__scene_number_list) + 1
__scene_number_list.update((name,))
name = 'Mayavi Scene %d' % name
engine.new_scene(name=name, size=size)
engine.current_scene.name = name
else:
if type(figure) in (int, np.int, np.int0, np.int8,
np.int16, np.int32, np.int64):
name = int(figure)
__scene_number_list.update((name,))
name = 'Mayavi Scene %d' % name
else:
name = str(figure)
# Go looking in the engine see if the scene is not already
# running
for scene in engine.scenes:
if scene.name == name:
engine.current_scene = scene
return scene
else:
engine.new_scene(name=name, size=size)
engine.current_scene.name = name
figure = engine.current_scene
scene = figure.scene
if scene is not None:
if hasattr(scene, 'isometric_view'):
scene.isometric_view()
else:
# Not every viewer might implement this method
view(40, 50)
scene = figure.scene
if scene is not None:
if bgcolor is None:
bgcolor = options.background_color
scene.background = bgcolor
if fgcolor is None:
fgcolor = options.foreground_color
scene.foreground = fgcolor
return figure
def gcf(engine=None):
"""Return a handle to the current figure.
You can supply the engine from which you want to retrieve the
current figure, if you have several mayavi engines.
"""
if engine is None:
engine = get_engine()
scene = engine.current_scene
if scene is None:
return figure(engine=engine)
return scene
def clf(figure=None):
"""Clear the current figure.
You can also supply the figure that you want to clear.
"""
try:
if figure is None:
scene = gcf()
else:
scene = figure
disable_render = scene.scene.disable_render
scene.scene.disable_render = True
scene.children[:] = []
scene._mouse_pick_dispatcher.clear_callbacks()
scene.scene.disable_render = disable_render
except AttributeError:
pass
gc.collect()
def close(scene=None, all=False):
""" Close a figure window
close() by itself closes the current figure.
close(num) closes figure number num.
close(name) closes figure named name.
close(figure), where figure is a scene instance, closes that
figure.
close(all=True) closes all figures controlled by mlab
"""
if all is True:
engine = get_engine()
# We need the copy, as the list gets pruned as we close scenes
for scene in copy.copy(engine.scenes):
engine.close_scene(scene)
return
if not isinstance(scene, Scene):
engine = get_engine()
if scene is None:
scene = engine.current_scene
else:
if type(scene) in (int, np.int, np.int0, np.int8,
np.int16, np.int32, np.int64):
scene = int(scene)
name = 'Mayavi Scene %d' % scene
else:
name = str(scene)
# Go looking in the engine see if the scene is not already
# running
for scene in engine.scenes:
if scene.name == name:
break
else:
warnings.warn('Scene %s not managed by mlab' % name)
return
else:
if scene.scene is None:
engine = registry.find_scene_engine(scene)
else:
engine = registry.find_scene_engine(scene.scene)
engine.close_scene(scene)
def draw(figure=None):
""" Forces a redraw of the current figure.
"""
if figure is None:
figure = gcf()
figure.render()
def savefig(filename, size=None, figure=None, magnification='auto',
**kwargs):
""" Save the current scene.
The output format are deduced by the extension to filename.
Possibilities are png, jpg, bmp, tiff, ps, eps, pdf, rib (renderman),
oogl (geomview), iv (OpenInventor), vrml, obj (wavefront)
**Parameters**
:size: the size of the image created (unless magnification is
set, in which case it is the size of the window used
for rendering).
:figure: the figure instance to save to a file.
:magnification: the magnification is the scaling between the
pixels on the screen, and the pixels in the
file saved. If you do not specify it, it will be
calculated so that the file is saved with the
specified size. If you specify a magnification,
Mayavi will use the given size as a screen size,
and the file size will be 'magnification * size'.
**Notes**
If the size specified is larger than the window size, and no
magnification parameter is passed, the magnification of the scene
is changed so that the image created has the requested size.
Please note that if you are trying to save images with sizes
larger than the window size, there will be additional computation
cost.
Any extra keyword arguments are passed along to the respective
image format's save method.
"""
if figure is None:
figure = gcf()
current_mag = figure.scene.magnification
try:
if size is not None:
current_x, current_y = tuple(figure.scene.get_size())
target_x, target_y = size
if magnification is 'auto':
magnification = max(target_x // current_x,
target_y // current_y) + 1
target_x = int(target_x / magnification)
target_y = int(target_y / magnification)
size = target_x, target_y
elif magnification is 'auto':
magnification = 1
figure.scene.magnification = int(magnification)
figure.scene.save(filename,
size=size,
**kwargs)
finally:
figure.scene.magnification = int(current_mag)
def sync_camera(reference_figure, target_figure):
""" Synchronise the camera of the target_figure on the camera of the
reference_figure.
"""
reference_figure.scene._renderer.sync_trait('active_camera',
target_figure.scene._renderer)
target_figure.scene._renderer.active_camera.on_trait_change(
lambda: do_later(target_figure.scene.render))
def screenshot(figure=None, mode='rgb', antialiased=False):
""" Return the current figure pixmap as an array.
**Parameters**
:figure: a figure instance or None, optional
If specified, the figure instance to capture the view of.
:mode: {'rgb', 'rgba'}
The color mode of the array captured.
:antialiased: {True, False}
Use anti-aliasing for rendering the screenshot.
Uses the number of aa frames set by
figure.scene.anti_aliasing_frames
**Notes**
On most systems, this works similarly to taking a screenshot of
the rendering window. Thus if it is hidden by another window, you
will capture the other window. This limitation is due to the
heavy use of the hardware graphics system.
**Examples**
This function can be useful for integrating 3D plotting with
Mayavi in a 2D plot created by matplotlib.
>>> from mayavi import mlab
>>> mlab.test_plot3d()
>>> arr = mlab.screenshot()
>>> import pylab as pl
>>> pl.imshow(arr)
>>> pl.axis('off')
>>> pl.show()
"""
if figure is None:
figure = gcf()
x, y = tuple(figure.scene.get_size())
# Try to lift the window
figure.scene._lift()
if mode == 'rgb':
out = tvtk.UnsignedCharArray()
shape = (y, x, 3)
pixel_getter = figure.scene.render_window.get_pixel_data
pg_args = (0, 0, x - 1, y - 1, 1, out)
elif mode == 'rgba':
out = tvtk.FloatArray()
shape = (y, x, 4)
pixel_getter = figure.scene.render_window.get_rgba_pixel_data
pg_args = (0, 0, x - 1, y - 1, 1, out)
else:
raise ValueError('mode type not understood')
if antialiased:
# save the current aa value to restore it later
old_aa = figure.scene.render_window.aa_frames
figure.scene.render_window.aa_frames = figure.scene.anti_aliasing_frames
figure.scene.render()
pixel_getter(*pg_args)
figure.scene.render_window.aa_frames = old_aa
figure.scene.render()
else:
pixel_getter(*pg_args)
# Return the array in a way that pylab.imshow plots it right:
out = out.to_array()
out.shape = shape
out = np.flipud(out)
return out
| bsd-3-clause |
snnn/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py | 116 | 5164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
DiamondLightSource/auto_tomo_calibration-experimental | measure_resolution/lmfit-py/examples/fit_with_inequality.py | 4 | 1227 | from numpy import linspace, random
import matplotlib.pyplot as plt
from lmfit import Parameters, Parameter, Minimizer, report_fit
from lmfit.lineshapes import gaussian, lorentzian
def residual(pars, x, data):
model = gaussian(x,
pars['amp_g'].value,
pars['cen_g'].value,
pars['wid_g'].value)
model += lorentzian(x,
pars['amp_l'].value,
pars['cen_l'].value,
pars['wid_l'].value)
return (model - data)
n = 601
random.seed(0)
x = linspace(0, 20.0, n)
data = (gaussian(x, 21, 6.1, 1.2) +
lorentzian(x, 10, 9.6, 1.3) +
random.normal(scale=0.1, size=n))
pfit = Parameters()
pfit.add(name='amp_g', value=10)
pfit.add(name='amp_l', value=10)
pfit.add(name='cen_g', value=5)
pfit.add(name='peak_split', value=2.5, min=0, max=5, vary=True)
pfit.add(name='cen_l', expr='peak_split+cen_g')
pfit.add(name='wid_g', value=1)
pfit.add(name='wid_l', expr='wid_g')
mini = Minimizer(residual, pfit, fcn_args=(x, data))
out = mini.leastsq()
report_fit(out.params)
best_fit = data + out.residual
plt.plot(x, data, 'bo')
plt.plot(x, best_fit, 'r--')
plt.show()
| apache-2.0 |
pyinduct/pyinduct | pyinduct/tests/test_eigenfunctions.py | 3 | 22740 | import unittest
import numpy as np
import pyinduct as pi
import pyinduct.parabolic as parabolic
from pyinduct.tests import show_plots
import matplotlib.pyplot as plt
class TestAddMulFunction(unittest.TestCase):
def test_it(self):
a_mat = np.diag(np.ones(3))
b = np.array(
[pi.AddMulFunction(lambda z: z), pi.AddMulFunction(lambda z: 2 * z), pi.AddMulFunction(lambda z: 3 * z)])
x = np.dot(b, a_mat)
self.assertAlmostEqual([4, 40, 300], [x[0](4), x[1](20), x[2](100)])
class TestSecondOrderEigenfunction(unittest.TestCase):
def test_error_raiser(self):
param = [1, 1, 1, 1, 1]
l = 1
z = pi.Domain((0, l), 2)
n = 10
eig_val, eig_funcs = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, scale=np.ones(n))
eig_freq = pi.SecondOrderDirichletEigenfunction.eigval_tf_eigfreq(
param, eig_val=eig_val)
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n)
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n, scale=np.ones(n))
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, eig_val=eig_val, scale=np.ones(n))
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, eig_freq=eig_freq, scale=np.ones(n))
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n, scale=np.ones(n + 1))
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, eig_val=eig_val, scale=np.ones(n + 1))
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n, eig_freq=eig_freq)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, eig_val=eig_val, eig_freq=eig_freq)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
z, param=param, n=n, eig_val=eig_val, eig_freq=eig_freq)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
pi.Domain((1, 2), 2), param=param, n=n)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
pi.Domain((0, -2), 2), param=param, n=n)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
pi.Domain((0, 0), 2), param=param, n=n)
with self.assertRaises(ValueError):
_, _ = pi.SecondOrderDirichletEigenfunction.cure_interval(
(0, 1), param=param, n=n)
class FiniteTransformTest(unittest.TestCase):
def test_trivial(self):
l = 5
k = 5
k1, k2, b = parabolic.control.split_domain(k, 0, l, mode='coprime')[0:3]
a_mat = parabolic.general.get_in_domain_transformation_matrix(
k1, k2, mode="2n")
self.assertAlmostEqual(b, 0)
self.assertTrue(all(np.isclose(a_mat, np.linalg.inv(a_mat)).all(1)))
k1, k2, b = parabolic.control.split_domain(k, l, l, mode='coprime')[0:3]
b_mat = parabolic.general.get_in_domain_transformation_matrix(
k1, k2, mode="2n")
self.assertAlmostEqual(b, l)
self.assertTrue(
all(np.isclose(b_mat, np.diag(np.ones(b_mat.shape[0]))).all(1)))
def test_paper_example(self):
l = 5
k = 5
b_desired = 2
k1, k2, b = parabolic.control.split_domain(k,
b_desired,
l,
mode='coprime')[0:3]
m_mat = np.linalg.inv(
parabolic.general.get_in_domain_transformation_matrix(k1,
k2,
mode="2n"))
shifted_func = pi.FiniteTransformFunction(
np.cos,
m_mat,
l,
nested_lambda=False)
shifted_func_nl = pi.FiniteTransformFunction(
np.cos,
m_mat,
l,
nested_lambda=True)
z = np.linspace(0, l, 1000)
np.testing.assert_array_almost_equal(
shifted_func(z), shifted_func_nl(z))
if show_plots:
plt.figure()
plt.plot(z, shifted_func(z))
plt.plot(z, np.cos(z))
plt.show()
def test_const(self):
n = 5
k = 5
b_desired = 2
l = 5
z = pi.Domain((0, l), 2)
params = [2, 1.5, -3, 1, .5]
k1, k2, b = parabolic.control.split_domain(k,
b_desired,
l,
mode='coprime')[0:3]
M = np.linalg.inv(
parabolic.general.get_in_domain_transformation_matrix(k1,
k2,
mode="2n"))
eig_val, eig_base = pi.SecondOrderRobinEigenfunction.cure_interval(
z, param=params, n=n)
shifted_eig_base = pi.Base(np.array(
[pi.FiniteTransformFunction(
func, M, l, nested_lambda=False)
for func in eig_base]))
shifted_eig_base_nl = pi.Base(np.array(
[pi.FiniteTransformFunction(
func, M, l, nested_lambda=True)
for func in eig_base]))
zz = np.linspace(0, l, 1000)
for f1, f2 in zip(shifted_eig_base, shifted_eig_base_nl):
np.testing.assert_array_almost_equal(f1(zz), f2(zz))
if show_plots:
pi.visualize_functions(eig_base.fractions, 1000)
pi.visualize_functions(shifted_eig_base.fractions, 1000)
def calc_dirichlet_eigenvalues(params):
"""
Estimate the eigenvalues of a 2nd order dirichlet problem .
by approximating it using polynomial shapefunctions.
"""
spat_dom, lag_base = pi.cure_interval(pi.LagrangeNthOrder,
interval=params.domain,
order=3,
node_count=31)
pi.register_base("fem_base", lag_base)
old_params = [params.a2, params.a1, params.a0, -params.alpha0, params.beta0]
weak_form = pi.parabolic.get_parabolic_dirichlet_weak_form("fem_base",
"fem_base",
None,
old_params,
params.domain)
can_form = pi.parse_weak_formulation(weak_form, finalize=True)
ss_form = pi.create_state_space(can_form)
sys_mat = ss_form.A[1]
eig_vals, eig_vecs = np.linalg.eig(sys_mat)
real_idx = np.where(np.imag(eig_vals) == 0)
abs_idx = np.argsort(np.abs(eig_vals[real_idx]))
filtered_vals = eig_vals[real_idx][abs_idx]
print(filtered_vals)
return filtered_vals
class TestSecondOrderEigenVector(unittest.TestCase):
def setUp(self):
self.domain = pi.Domain(bounds=(0, 1), num=100)
self.cnt = 10
self.params_dirichlet = pi.SecondOrderOperator(a2=1,
a1=0,
a0=1,
alpha1=0,
alpha0=1,
beta1=0,
beta0=1,
domain=(0, 1))
if 1:
self.eig_dirichlet = None
self.p_dirichlet = [(1j*n * np.pi, -1j * n * np.pi)
for n in range(1, self.cnt + 1)]
else:
# TODO make computation by approximation work to check to other two
self.eig_dirichlet = \
calc_dirichlet_eigenvalues(self.params_dirichlet)[:self.cnt]
self.p_dirichlet = \
pi.SecondOrderEigenVector.convert_to_characteristic_root(
self.params_dirichlet,
self.eig_dirichlet
)
self.params_neumann = pi.SecondOrderOperator(a2=1,
a1=0,
a0=1,
alpha1=1,
alpha0=0,
beta1=1,
beta0=0)
self.eig_neumann = None
self.p_neumann = None
# self.p_neumann = np.array([0, np.pi, 2 * np.pi, 3 * np.pi],
# dtype=complex)
self.params_robin = pi.Parameters(a2=1,
a1=0,
a0=1,
alpha1=1,
alpha0=2,
beta1=1,
beta0=-2)
self.eig_robin = None
self.p_robin = None
# self.p_robin = np.array([(2.39935728j, -2.39935728j,),
# (5.59677209j, -5.59677209j),
# (8.98681892j, -8.98681892j)])
def test_dirichlet(self):
print("dirichlet case")
self._test_helper(self.params_dirichlet,
self.eig_dirichlet,
self.p_dirichlet)
def test_neumann(self):
print("neumann case")
self._test_helper(self.params_neumann,
self.eig_neumann,
self.p_neumann)
def test_robin(self):
print("robin case")
self._test_helper(self.params_robin,
self.eig_robin,
self.p_robin)
def _test_helper(self, params, l_ref, p_ref):
eig_base = pi.SecondOrderEigenVector.cure_interval(self.domain,
params=params,
count=self.cnt,
derivative_order=2,
debug=False)
char_roots = eig_base.get_attribute("char_pair")
eig_values = pi.SecondOrderEigenVector.convert_to_eigenvalue(params,
char_roots)
# if show_plots:
# pi.visualize_functions(eig_base.fractions)
# test eigenvalues
self.assertEqual(len(eig_values), self.cnt)
if l_ref is not None:
np.testing.assert_array_equal(eig_values, l_ref, verbose=True)
if p_ref is not None:
print(char_roots)
print(p_ref)
np.testing.assert_array_almost_equal(char_roots, p_ref,
decimal=5, verbose=True)
# test eigenvectors
for fraction, lam in zip(eig_base.fractions, eig_values):
# test whether the operator is satisfied
left = (params.a2 * fraction.derive(2)(self.domain.points)
+ params.a1 * fraction.derive(1)(self.domain.points)
+ params.a0 * fraction(self.domain.points))
right = lam * fraction(self.domain.points)
np.testing.assert_array_almost_equal(left, right, verbose=True)
# test whether the bcs are fulfilled
bc1 = (params.alpha0 * fraction(self.domain.bounds[0])
+ params.alpha1 * fraction.derive(1)(self.domain.bounds[0]))
bc2 = (params.beta0 * fraction(self.domain.bounds[1])
+ params.beta1 * fraction.derive(1)(self.domain.bounds[1]))
np.testing.assert_array_almost_equal(bc1, 0, decimal=5)
np.testing.assert_array_almost_equal(bc2, 0, decimal=5)
# check if they are orthonormal
product_mat = pi.calculate_scalar_product_matrix(eig_base, eig_base)
np.testing.assert_array_almost_equal(product_mat,
np.eye(self.cnt))
return eig_base
class TestEigenvalues(unittest.TestCase):
def test_dirichlet(self):
desired_eig_freq = [(i + 1) * np.pi for i in range(4)]
eig_freq, _ = pi.SecondOrderDirichletEigenfunction.eigfreq_eigval_hint(
[1, 2, 3, None, None],
1,
4)
self.assertTrue(all(np.isclose(eig_freq, desired_eig_freq)))
def test_robin(self):
param_desired_ef_pairs = [
([.5, 0, 6, -1, -1], [1.543405j, 2.331122, 5.950173, 9.208434]),
([1, 0, 1, -2, -2], [2.39935728j, 0, 5.59677209, 8.98681892]),
([1, 0, 1, 0, 0], [0j, 3.14159265, 6.28318531, 9.42477796]),
([1, 2, 1, 3, 4], [2.06301691, 4.46395118, 7.18653501, 10.09113552]),
([1, -6, 0, -5, -5], [8.000003j, 1.84683426j, 4.86945051, 8.43284888])]
for param, desired_eig_freq in param_desired_ef_pairs:
eig_freq, _ = pi.SecondOrderRobinEigenfunction.eigfreq_eigval_hint(
param, 1, 4, show_plot=False)
np.testing.assert_array_almost_equal(eig_freq, desired_eig_freq)
class TestSecondOrderEigenvalueProblemFunctions(unittest.TestCase):
def setUp(self):
self.param = [2, 1.5, -3, -5, -.5]
self.z = pi.Domain((0, 1), num=100)
self.n = 10
def evp_eq(self, a2, a1, a0, boundary_check):
for eig_v, eig_f in zip(self.eig_val, self.eig_funcs):
np.testing.assert_array_almost_equal(
(a2 * eig_f.derive(2)(self.z)
+ a1 * eig_f.derive(1)(self.z)
+ a0 * eig_f(self.z)) / eig_v,
eig_v.real * eig_f(self.z) / eig_v,
decimal=4)
boundary_check(eig_v, eig_f, self.z[-1])
@unittest.skip("not implemented")
def test_dirichlet_robin_constant_coefficient(self):
def boundary_check(eig_v, eig_f, l):
np.testing.assert_array_almost_equal(eig_f(0) / eig_v, 0)
np.testing.assert_array_almost_equal(eig_f.derive(1)(l) / eig_v,
-beta * eig_f(l) / eig_v)
a2, a1, a0, _, beta = self.param
param = [a2, a1, a0, None, beta]
eig_freq, self.eig_val \
= pi.SecondOrderDiriRobEigenfunction.eigfreq_eigval_hint(
self.z, param=param, n=self.n, show_plot=True)
_, self.eig_funcs = pi.SecondOrderDiriRobEigenfunction.cure_interval(
self.z, param=param, eig_freq=eig_freq)
[plt.plot(self.z, func(self.z)) for func in self.eig_funcs]
plt.show()
self.evp_eq(a2, a1, a0, boundary_check)
self.spatially_varying_coefficient(boundary_check)
@unittest.skip("not implemented")
def test_robin_dirichlet_constant_coefficient(self):
def boundary_check(eig_v, eig_f, l):
np.testing.assert_array_almost_equal(eig_f.derive(1)(0) / eig_v,
alpha * eig_f(0) / eig_v)
np.testing.assert_array_almost_equal(eig_f(l) / eig_v, 0)
a2, a1, a0, alpha, _ = self.param
param = [a2, a1, a0, alpha, None]
eig_freq, self.eig_val \
= pi.SecondOrderRobDiriEigenfunction.eigfreq_eigval_hint(
self.z, param=param, n=self.n, show_plot=True)
_, self.eig_funcs = pi.SecondOrderRobDiriEigenfunction.cure_interval(
self.z, param=param, eig_freq=eig_freq)
[plt.plot(self.z, func(self.z)) for func in self.eig_funcs]
plt.show()
self.evp_eq(a2, a1, a0, boundary_check)
self.spatially_varying_coefficient(boundary_check)
def test_dirichlet_constant_coefficient(self):
def boundary_check(eig_v, eig_f, l):
np.testing.assert_array_almost_equal(eig_f(0) / eig_v, 0)
np.testing.assert_array_almost_equal(eig_f(l) / eig_v, 0)
a2, a1, a0, _, _ = self.param
param = [a2, a1, a0, None, None]
eig_freq, self.eig_val \
= pi.SecondOrderDirichletEigenfunction.eigfreq_eigval_hint(
param, self.z[-1], self.n)
_, self.eig_funcs = pi.SecondOrderDirichletEigenfunction.cure_interval(
self.z, param=param, eig_freq=eig_freq)
self.evp_eq(a2, a1, a0, boundary_check)
self.spatially_varying_coefficient(boundary_check)
def test_robin_constant_coefficient(self):
def boundary_check(eig_v, eig_f, l):
np.testing.assert_array_almost_equal(eig_f.derive(1)(0) / eig_v,
alpha * eig_f(0) / eig_v)
np.testing.assert_array_almost_equal(eig_f.derive(1)(l) / eig_v,
- beta * eig_f(l) / eig_v)
a2, a1, a0, alpha, beta = self.param
eig_freq, self.eig_val \
= pi.SecondOrderRobinEigenfunction.eigfreq_eigval_hint(
self.param,
self.z[-1],
self.n,
show_plot=show_plots)
_, self.eig_funcs = pi.SecondOrderRobinEigenfunction.cure_interval(
self.z, param=self.param, eig_freq=eig_freq)
self.evp_eq(a2, a1, a0, boundary_check)
self.spatially_varying_coefficient(boundary_check)
if show_plots:
plt.show()
def spatially_varying_coefficient(self, boundary_check):
a2, a1, a0, _, _ = self.param
a2_z = pi.ConstantFunction(a2)
a1_z = pi.ConstantFunction(a1)
a0_z = pi.ConstantFunction(a0)
transformed_eig_funcs = [pi.TransformedSecondOrderEigenfunction(
self.eig_val[i],
[self.eig_funcs[i](0), self.eig_funcs[i].derive(1)(0), 0, 0],
[a2_z, a1_z, a0_z],
self.z)
for i in range(len(self.eig_funcs))]
# TODO: provide second derivative of transformed eigenfunctions
for i in range(len(self.eig_funcs)):
eig_f = transformed_eig_funcs[i]
eig_v = self.eig_val[i]
# interval
np.testing.assert_array_almost_equal(
a2_z(self.z) * self.eig_funcs[i].derive(2)(self.z)
+ a1_z(self.z) * eig_f.derive(1)(self.z)
+ a0_z(self.z) * eig_f(self.z),
eig_v.real * eig_f(self.z),
decimal=2)
boundary_check(eig_v, eig_f, self.z[-1])
class IntermediateTransformationTest(unittest.TestCase):
def test_it(self):
# system/simulation parameters
self.l = 1
self.spatial_domain = (0, self.l)
self.spatial_disc = 30
self.n = 10
# original system parameters
a2 = 1.5
a1 = 2.5
a0 = 28
alpha = -2
beta = -3
self.param = [a2, a1, a0, alpha, beta]
adjoint_param = pi.SecondOrderEigenfunction.get_adjoint_problem(self.param)
# target system parameters (controller parameters)
a1_t = -5
a0_t = -25
alpha_t = 3
beta_t = 2
# a1_t = a1; a0_t = a0; alpha_t = alpha; beta_t = beta
self.param_t = [a2, a1_t, a0_t, alpha_t, beta_t]
# original intermediate ("_i") and target intermediate ("_ti") system parameters
_, _, a0_i, self.alpha_i, self.beta_i = \
parabolic.general.eliminate_advection_term(self.param, self.l)
self.param_i = a2, 0, a0_i, self.alpha_i, self.beta_i
_, _, a0_ti, self.alpha_ti, self.beta_ti = \
parabolic.general.eliminate_advection_term(self.param_t, self.l)
self.param_ti = a2, 0, a0_ti, self.alpha_ti, self.beta_ti
# create (not normalized) eigenfunctions
self.eig_freq, self.eig_val = \
pi.SecondOrderRobinEigenfunction.eigfreq_eigval_hint(self.param,
self.l,
self.n)
init_eig_base = pi.Base([pi.SecondOrderRobinEigenfunction(om,
self.param,
self.spatial_domain[-1])
for om in self.eig_freq])
init_adjoint_eig_funcs = pi.Base([pi.SecondOrderRobinEigenfunction(om,
adjoint_param,
self.spatial_domain[-1])
for om in self.eig_freq])
# normalize eigenfunctions and adjoint eigenfunctions
self.eig_base, self.adjoint_eig_funcs = pi.normalize_base(init_eig_base, init_adjoint_eig_funcs)
# eigenvalues and -frequencies test
eig_freq_i, eig_val_i = pi.SecondOrderRobinEigenfunction.eigfreq_eigval_hint(self.param_i, self.l, self.n)
self.assertTrue(all(np.isclose(self.eig_val, eig_val_i)))
calc_eig_freq = np.sqrt((a0_i - eig_val_i) / a2)
self.assertTrue(all(np.isclose(calc_eig_freq, eig_freq_i)))
# intermediate (_i) eigenfunction test
eig_funcs_i = np.array([pi.SecondOrderRobinEigenfunction(eig_freq_i[i], self.param_i, self.spatial_domain[-1],
self.eig_base.fractions[i](0))
for i in range(self.n)])
self.assertTrue(all(np.isclose([func(0) for func in eig_funcs_i],
[func(0) for func in self.eig_base.fractions])))
test_vec = np.linspace(0, self.l, 100)
for i in range(self.n):
self.assertTrue(all(np.isclose(self.eig_base.fractions[i](test_vec),
eig_funcs_i[i](test_vec) * np.exp(-a1 / 2 / a2 * test_vec))))
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/tests/io/msgpack/test_format.py | 25 | 2882 | # coding: utf-8
from pandas.io.msgpack import unpackb
def check(src, should, use_list=0):
assert unpackb(src, use_list=use_list) == should
def testSimpleValue():
check(b"\x93\xc0\xc2\xc3", (None, False, True, ))
def testFixnum():
check(b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff", ((0,
64,
127, ),
(-32,
-16,
-1, ), ))
def testFixArray():
check(b"\x92\x90\x91\x91\xc0", ((), ((None, ), ), ), )
def testFixRaw():
check(b"\x94\xa0\xa1a\xa2bc\xa3def", (b"", b"a", b"bc", b"def", ), )
def testFixMap():
check(b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80",
{False: {None: None},
True: {None: {}}}, )
def testUnsignedInt():
check(b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00"
b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00"
b"\xce\xff\xff\xff\xff",
(0,
128,
255,
0,
32768,
65535,
0,
2147483648,
4294967295, ), )
def testSignedInt():
check(b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00"
b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00"
b"\xd2\xff\xff\xff\xff", (0,
-128,
-1,
0,
-32768,
-1,
0,
-2147483648,
-1, ))
def testRaw():
check(b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
(b"", b"a", b"ab", b"", b"a", b"ab"))
def testArray():
check(b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00"
b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02"
b"\xc2\xc3", ((), (None, ), (False, True), (), (None, ),
(False, True)))
def testMap():
check(b"\x96"
b"\xde\x00\x00"
b"\xde\x00\x01\xc0\xc2"
b"\xde\x00\x02\xc0\xc2\xc3\xc2"
b"\xdf\x00\x00\x00\x00"
b"\xdf\x00\x00\x00\x01\xc0\xc2"
b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2", ({}, {None: False},
{True: False,
None: False}, {},
{None: False},
{True: False,
None: False}))
| gpl-2.0 |
Intel-tensorflow/tensorflow | tensorflow/python/keras/distribute/dataset_creator_model_fit_ps_only_test.py | 6 | 5290 | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute.coordinator import cluster_coordinator as coordinator_lib
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import callbacks as callbacks_lib
from tensorflow.python.keras.distribute import dataset_creator_model_fit_test_base as test_base
from tensorflow.python.keras.distribute import strategy_combinations
from tensorflow.python.platform import gfile
@ds_combinations.generate(
combinations.combine(
strategy=strategy_combinations.parameter_server_strategies_multi_worker,
mode="eager"))
class DatasetCreatorModelFitParameterServerStrategyOnlyTest(
test_base.DatasetCreatorModelFitTestBase):
def testModelFitWithRunEagerly(self, strategy):
with self.assertRaisesRegex(
ValueError, "When using `Model` with `ParameterServerStrategy`, "
"`run_eagerly` is not supported."):
self._model_fit(strategy, run_eagerly=True)
def testModelFitWithDatasetInstance(self, strategy):
with self.assertRaisesRegex(
NotImplementedError,
"Only `tf.keras.utils.experimental.DatasetCreator`, `tf.Tensor`, "
"numpy arrays and pandas dataframes are supported types at this "
"time."):
self._model_fit(
strategy, x=dataset_ops.DatasetV2.from_tensor_slices([1, 1]))
def testModelPredict(self, strategy):
model, _ = self._model_compile(strategy)
test_data = dataset_ops.DatasetV2.from_tensor_slices(
[1., 2., 3., 1., 5., 1.]).repeat().batch(2)
model.predict(x=test_data, steps=3)
def testClusterCoordinatorSingleInstance(self, strategy):
model = self._model_fit(strategy)
strategy = model.distribute_strategy
self.assertIs(strategy._cluster_coordinator,
coordinator_lib.ClusterCoordinator(strategy))
def testModelFitErrorOnBatchLevelCallbacks(self, strategy):
class BatchLevelCallback(callbacks_lib.Callback):
def on_train_batch_end(self, batch, logs=None):
pass
with self.assertRaisesRegex(ValueError,
"Batch-level `Callback`s are not supported"):
callbacks = [BatchLevelCallback()]
self._model_fit(strategy, callbacks=callbacks)
def testModelFitCallbackSupportsTFLogs(self, strategy):
class MyCallback(callbacks_lib.Callback):
def __init__(self):
super(MyCallback, self).__init__()
# Fetches the RemoteValues if necessary.
self._supports_tf_logs = True
def on_train_batch_end(self, batch, logs=None):
assert isinstance(logs, coordinator_lib.RemoteValue)
my_callback = MyCallback()
callbacks = [my_callback]
self._model_fit(strategy, callbacks=callbacks)
def testModelFitVerbosity(self, strategy):
class MyCallback(callbacks_lib.Callback):
pass
my_callback = MyCallback()
callbacks = [my_callback]
self._model_fit(strategy, callbacks=callbacks)
# PSStrategy should default to epoch-level logging.
self.assertEqual(my_callback.params["verbose"], 2)
def testModelFitTensorBoardEpochLevel(self, strategy):
log_dir = self.get_temp_dir()
callbacks = [callbacks_lib.TensorBoard(log_dir)]
self._model_fit(strategy, callbacks=callbacks)
self.assertTrue(gfile.Exists(log_dir))
files = gfile.ListDirectory(log_dir)
self.assertGreaterEqual(len(files), 1)
def testModelEvaluateWithDatasetInstance(self, strategy):
with self.assertRaisesRegex(
NotImplementedError,
"Only `tf.keras.utils.experimental.DatasetCreator`, `tf.Tensor`, "
"numpy arrays and pandas dataframes are supported types at this "
"time."):
self._model_evaluate(
strategy, x=dataset_ops.DatasetV2.from_tensor_slices([1, 1]))
def testModelEvaluateErrorOnBatchLevelCallbacks(self, strategy):
class BatchLevelCallback(callbacks_lib.Callback):
def on_train_batch_end(self, batch, logs=None):
pass
with self.assertRaisesRegex(ValueError,
"Batch-level `Callback`s are not supported"):
callbacks = [BatchLevelCallback()]
self._model_evaluate(strategy, callbacks=callbacks)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
multi_process_runner.test_main()
| apache-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/delaunay/triangulate.py | 2 | 9909 | from __future__ import print_function
import warnings
# 2.3 compatibility
try:
set
except NameError:
import sets
set = sets.Set
from itertools import izip
import numpy as np
from matplotlib._delaunay import delaunay
from interpolate import LinearInterpolator, NNInterpolator
__all__ = ['Triangulation', 'DuplicatePointWarning']
class DuplicatePointWarning(RuntimeWarning):
"""Duplicate points were passed in to the triangulation routine.
"""
class Triangulation(object):
"""A Delaunay triangulation of points in a plane.
Triangulation(x, y)
x, y -- the coordinates of the points as 1-D arrays of floats
Let us make the following definitions:
npoints = number of points input
nedges = number of edges in the triangulation
ntriangles = number of triangles in the triangulation
point_id = an integer identifying a particular point (specifically, an
index into x and y), range(0, npoints)
edge_id = an integer identifying a particular edge, range(0, nedges)
triangle_id = an integer identifying a particular triangle
range(0, ntriangles)
Attributes: (all should be treated as read-only to maintain consistency)
x, y -- the coordinates of the points as 1-D arrays of floats.
circumcenters -- (ntriangles, 2) array of floats giving the (x,y)
coordinates of the circumcenters of each triangle (indexed by a
triangle_id).
edge_db -- (nedges, 2) array of point_id's giving the points forming
each edge in no particular order; indexed by an edge_id.
triangle_nodes -- (ntriangles, 3) array of point_id's giving the points
forming each triangle in counter-clockwise order; indexed by a
triangle_id.
triangle_neighbors -- (ntriangles, 3) array of triangle_id's giving the
neighboring triangle; indexed by a triangle_id.
The value can also be -1 meaning that that edge is on the convex hull of
the points and there is no neighbor on that edge. The values are ordered
such that triangle_neighbors[tri, i] corresponds with the edge
*opposite* triangle_nodes[tri, i]. As such, these neighbors are also in
counter-clockwise order.
hull -- list of point_id's giving the nodes which form the convex hull
of the point set. This list is sorted in counter-clockwise order.
Duplicate points.
If there are no duplicate points, Triangulation stores the specified
x and y arrays and there is no difference between the client's and
Triangulation's understanding of point indices used in edge_db,
triangle_nodes and hull.
If there are duplicate points, they are removed from the stored
self.x and self.y as the underlying delaunay code cannot deal with
duplicates. len(self.x) is therefore equal to len(x) minus the
number of duplicate points. Triangulation's edge_db, triangle_nodes
and hull refer to point indices in self.x and self.y, for internal
consistency within Triangulation and the corresponding Interpolator
classes. Client code must take care to deal with this in one of
two ways:
1. Ignore the x,y it specified in Triangulation's constructor and
use triangulation.x and triangulation.y instead, as these are
consistent with edge_db, triangle_nodes and hull.
2. If using the x,y the client specified then edge_db,
triangle_nodes and hull should be passed through the function
to_client_point_indices() first.
"""
def __init__(self, x, y):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x,y must be equal-length 1-D arrays")
self.old_shape = self.x.shape
duplicates = self._get_duplicate_point_indices()
if len(duplicates) > 0:
warnings.warn(
"Input data contains duplicate x,y points; some values are ignored.",
DuplicatePointWarning,
)
# self.j_unique is the array of non-duplicate indices, in
# increasing order.
self.j_unique = np.delete(np.arange(len(self.x)), duplicates)
self.x = self.x[self.j_unique]
self.y = self.y[self.j_unique]
else:
self.j_unique = None
# If there are duplicate points, need a map of point indices used
# by delaunay to those used by client. If there are no duplicate
# points then the map is not needed. Either way, the map is
# conveniently the same as j_unique, so share it.
self._client_point_index_map = self.j_unique
self.circumcenters, self.edge_db, self.triangle_nodes, \
self.triangle_neighbors = delaunay(self.x, self.y)
self.hull = self._compute_convex_hull()
def _get_duplicate_point_indices(self):
"""Return array of indices of x,y points that are duplicates of
previous points. Indices are in no particular order.
"""
# Indices of sorted x,y points.
j_sorted = np.lexsort(keys=(self.x, self.y))
mask_duplicates = np.hstack([
False,
(np.diff(self.x[j_sorted]) == 0) & (np.diff(self.y[j_sorted]) == 0),
])
# Array of duplicate point indices, in no particular order.
return j_sorted[mask_duplicates]
def _compute_convex_hull(self):
"""Extract the convex hull from the triangulation information.
The output will be a list of point_id's in counter-clockwise order
forming the convex hull of the data set.
"""
border = (self.triangle_neighbors == -1)
edges = {}
edges.update(dict(izip(self.triangle_nodes[border[:,0]][:,1],
self.triangle_nodes[border[:,0]][:,2])))
edges.update(dict(izip(self.triangle_nodes[border[:,1]][:,2],
self.triangle_nodes[border[:,1]][:,0])))
edges.update(dict(izip(self.triangle_nodes[border[:,2]][:,0],
self.triangle_nodes[border[:,2]][:,1])))
# Take an arbitrary starting point and its subsequent node
hull = list(edges.popitem())
while edges:
hull.append(edges.pop(hull[-1]))
# hull[-1] == hull[0], so remove hull[-1]
hull.pop()
return hull
def to_client_point_indices(self, array):
"""Converts any array of point indices used within this class to
refer to point indices within the (x,y) arrays specified in the
constructor before duplicates were removed.
"""
if self._client_point_index_map is not None:
return self._client_point_index_map[array]
else:
return array
def linear_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
assigning a plane to each triangle.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return LinearInterpolator(self, z, default_value)
def nn_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
the natural neighbors method.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return NNInterpolator(self, z, default_value)
def prep_extrapolator(self, z, bbox=None):
if bbox is None:
bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
minx = min(minx, np.minimum.reduce(self.x))
miny = min(miny, np.minimum.reduce(self.y))
maxx = max(maxx, np.maximum.reduce(self.x))
maxy = max(maxy, np.maximum.reduce(self.y))
M = max((maxx-minx)/2, (maxy-miny)/2)
midx = (minx + maxx)/2.0
midy = (miny + maxy)/2.0
xp, yp= np.array([[midx+3*M, midx, midx-3*M],
[midy, midy+3*M, midy-3*M]])
x1 = np.hstack((self.x, xp))
y1 = np.hstack((self.y, yp))
newtri = self.__class__(x1, y1)
# do a least-squares fit to a plane to make pseudo-data
xy1 = np.ones((len(self.x), 3), np.float64)
xy1[:,0] = self.x
xy1[:,1] = self.y
from numpy.dual import lstsq
c, res, rank, s = lstsq(xy1, z)
zp = np.hstack((z, xp*c[0] + yp*c[1] + c[2]))
return newtri, zp
def nn_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.nn_interpolator(zp, default_value)
def linear_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.linear_interpolator(zp, default_value)
def node_graph(self):
"""Return a graph of node_id's pointing to node_id's.
The arcs of the graph correspond to the edges in the triangulation.
{node_id: set([node_id, ...]), ...}
"""
g = {}
for i, j in self.edge_db:
s = g.setdefault(i, set())
s.add(j)
s = g.setdefault(j, set())
s.add(i)
return g
| mit |
vybstat/scikit-learn | doc/sphinxext/gen_rst.py | 106 | 40198 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
MicrosoftGenomics/LEAP | leap/probit.py | 1 | 12641 | import numpy as np
import scipy.stats as stats
import scipy.linalg as la
import time
import sklearn.linear_model
import sys
import argparse
import scipy.optimize as opt
import scipy.linalg.blas as blas
import leapUtils
import leapMain
np.set_printoptions(precision=6, linewidth=200)
def evalProbitReg(beta, X, cases, controls, thresholds, invRegParam, normPDF, h2):
XBeta = np.ravel(X.dot(beta)) - thresholds
phiXBeta = normPDF.pdf(XBeta)
PhiXBeta = normPDF.cdf(XBeta)
logLik = np.sum(np.log(PhiXBeta[cases])) + np.sum(np.log(1-PhiXBeta[controls]))
w = np.zeros(X.shape[0])
w[cases] = -phiXBeta[cases] / PhiXBeta[cases]
w[controls] = phiXBeta[controls] / (1-PhiXBeta[controls])
grad = X.T.dot(w)
#regularize
logLik -= 0.5*invRegParam * beta.dot(beta) #regularization
grad += invRegParam * beta
return (-logLik, grad)
def probitRegHessian(beta, X, cases, controls, thresholds, invRegParam, normPDF, h2):
XBeta = np.ravel(X.dot(beta)) - thresholds
phiXBeta = normPDF.pdf(XBeta)
PhiXBeta = normPDF.cdf(XBeta)
XbetaScaled = XBeta #/(1-h2)
R = np.zeros(X.shape[0])
R[cases] = (XbetaScaled[cases]*PhiXBeta[cases] + phiXBeta[cases]) / PhiXBeta[cases]**2
R[controls] = (-XbetaScaled[controls]*(1-PhiXBeta[controls]) + phiXBeta[controls]) / (1 - PhiXBeta[controls])**2
R *= phiXBeta
H = (X.T * R).dot(X)
H += invRegParam
return H
def probitRegression(X, y, thresholds, numSNPs, numFixedFeatures, h2, useHess, maxFixedIters, epsilon, nofail):
regParam = h2 / float(numSNPs)
Linreg = sklearn.linear_model.Ridge(alpha=1.0/(2*regParam), fit_intercept=False, normalize=False, solver='lsqr')
Linreg.fit(X, y)
initBeta = Linreg.coef_
np.random.seed(1234)
normPDF = stats.norm(0, np.sqrt(1-h2))
invRegParam = 1.0/regParam
controls = (y==0)
cases = (y==1)
funcToSolve = evalProbitReg
hess =(probitRegHessian if useHess else None)
jac= True
method = 'Newton-CG'
args = (X, cases, controls, thresholds, invRegParam, normPDF, h2)
print 'Beginning Probit regression...'
t0 = time.time()
optObj = opt.minimize(funcToSolve, x0=initBeta, args=args, jac=jac, method=method, hess=hess)
print 'Done in', '%0.2f'%(time.time()-t0), 'seconds'
if (not optObj.success):
print 'Optimization status:', optObj.status
print optObj.message
if (nofail == 0): raise Exception('Probit regression failed with message: ' + optObj.message)
beta = optObj.x
#Fit fixed effects
if (numFixedFeatures > 0):
thresholdsEM = np.zeros(X.shape[0]) + thresholds
for i in xrange(maxFixedIters):
print 'Beginning fixed effects iteration', i+1
t0 = time.time()
prevBeta = beta.copy()
#Learn fixed effects
thresholdsTemp = thresholdsEM - X[:, numFixedFeatures:].dot(beta[numFixedFeatures:])
args = (X[:, :numFixedFeatures], cases, controls, thresholdsTemp, 0, normPDF, h2)
optObj = opt.minimize(funcToSolve, x0=beta[:numFixedFeatures], args=args, jac=True, method=method, hess=hess)
if (not optObj.success): print optObj.message; #raise Exception('Learning failed with message: ' + optObj.message)
beta[:numFixedFeatures] = optObj.x
#Learn random effects
thresholdsTemp = thresholdsEM - X[:, :numFixedFeatures].dot(beta[:numFixedFeatures])
args = (X[:, numFixedFeatures:], cases, controls, thresholdsTemp, invRegParam, normPDF, h2)
optObj = opt.minimize(funcToSolve, x0=beta[numFixedFeatures:], args=args, jac=True, method=method, hess=hess)
if (not optObj.success): print optObj.message; #raise Exception('Learning failed with message: ' + optObj.message)
beta[numFixedFeatures:] = optObj.x
diff = np.sqrt(np.mean(beta[:numFixedFeatures]**2 - prevBeta[:numFixedFeatures]**2))
print 'Done in', '%0.2f'%(time.time()-t0), 'seconds'
print 'Diff:', '%0.4e'%diff
if (diff < epsilon): break
return beta
def probit(bed, pheno, h2, prev, eigen, outFile, keepArr, covar, thresholds, nofail,
numSkipTopPCs, mineig, hess, recenter, maxFixedIters, epsilon, treatFixedAsRandom=False):
bed, pheno = leapUtils._fixupBedAndPheno(bed, pheno)
#Extract phenotype
if isinstance(pheno, dict): phe = pheno['vals']
else: phe = pheno
if (len(phe.shape)==2):
if (phe.shape[1]==1): phe=phe[:,0]
else: raise Exception('More than one phenotype found')
if (keepArr is None): keepArr = np.ones(phe.shape[0], dtype=np.bool)
S = eigen['arr_1'] * bed.sid.shape[0]
U = eigen['arr_0']
S = np.sqrt(S)
goodS = (S>mineig)
if (numSkipTopPCs > 0): goodS[-numSkipTopPCs:] = False
if (np.sum(~goodS) > 0): print 'Removing', np.sum(~goodS), 'PCs with low variance'
G = U[:, goodS]*S[goodS]
#Set binary vector
pheUnique = np.unique(phe)
if (pheUnique.shape[0] != 2): raise Exception('phenotype file has more than two values')
pheMean = phe.mean()
cases = (phe>pheMean)
phe[~cases] = 0
phe[cases] = 1
#run probit regression
t = stats.norm(0,1).isf(prev)
if (thresholds is not None): t = thresholds
#Recenter G to only consider the unrelated individuals
if recenter: G -= np.mean(G[keepArr, :], axis=0)
else: G -= np.mean(G, axis=0)
numFixedFeatures = 0
if (covar is not None):
covar -= covar.mean()
covar /= covar.std()
covar *= np.mean(np.std(G, axis=0))
G = np.concatenate((covar, G), axis=1)
if (not treatFixedAsRandom): numFixedFeatures += covar.shape[1]
#Run Probit regression
probitThresh = (t if thresholds is None else t[keepArr])
beta = probitRegression(G[keepArr, :], phe[keepArr], probitThresh, bed.sid.shape[0], numFixedFeatures, h2, hess, maxFixedIters, epsilon, nofail)
#Predict liabilities for all individuals
meanLiab = G.dot(beta)
liab = meanLiab.copy()
indsToFlip = ((liab <= t) & (phe>0.5)) | ((liab > t) & (phe<0.5))
liab[indsToFlip] = stats.norm(0,1).isf(prev)
if (outFile is not None):
#save liabilities
f = open(outFile+'.liabs', 'w')
for ind_i,[fid,iid] in enumerate(bed.iid): f.write(' '.join([fid, iid, '%0.3f'%liab[ind_i]]) + '\n')
f.close()
#save liabilities after regressing out the fixed effects
if (numFixedFeatures > 0):
liab_nofixed = liab - G[:, :numFixedFeatures].dot(beta[:numFixedFeatures])
f = open(outFile+'.liab_nofixed', 'w')
for ind_i,[fid,iid] in enumerate(bed.iid): f.write(' '.join([fid, iid, '%0.3f'%liab_nofixed[ind_i]]) + '\n')
f.close()
liab_nofixed2 = meanLiab - G[:, :numFixedFeatures].dot(beta[:numFixedFeatures])
indsToFlip = ((liab_nofixed2 <= t) & (phe>0.5)) | ((liab_nofixed2 > t) & (phe<0.5))
liab_nofixed2[indsToFlip] = stats.norm(0,1).isf(prev)
f = open(outFile+'.liab_nofixed2', 'w')
for ind_i,[fid,iid] in enumerate(bed.iid): f.write(' '.join([fid, iid, '%0.3f'%liab_nofixed2[ind_i]]) + '\n')
f.close()
#Return phenotype struct with liabilities
liabsStruct = {
'header':[None],
'vals':liab,
'iid':bed.iid
}
return liabsStruct
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bfilesim', metavar='bfilesim', default=None, help='Binary plink file')
parser.add_argument('--pheno', metavar='pheno', default=None, help='Phenotype file in Plink format')
parser.add_argument('--h2', metavar='h2', type=float, default=None, help='Liability heritability')
parser.add_argument('--eigen', metavar='eigen', default=None, help='eigen file')
parser.add_argument('--prev', metavar='prev', type=float, default=None, help='Trait prevalence')
parser.add_argument('--extractSim', metavar='extractSim', default=None, help='SNPs subset to use')
parser.add_argument('--out', metavar='out', default=None, help='output file')
parser.add_argument('--covar', metavar='covar', default=None, help='covariates file in FastLMM format')
parser.add_argument('--thresholds', metavar='thresholds', default=None, help="liability thresholds file")
parser.add_argument('--nofail', metavar='nofail', type=int, default=0, help="Do not raise exception if Probit fitting failed")
parser.add_argument('--treatFixedAsRandom', metavar='treatFixedAsRandom', type=int, default=0, help="Whether to treat fixed effects as random effects")
parser.add_argument('--relCutoff', metavar='relCutoff', type=float, default=0.05, help='Relatedness cutoff')
parser.add_argument('--numSkipTopPCs', metavar='numSkipTopPCs', type=int, default=0, help='Number of PCs to skip')
parser.add_argument('--numFixedPCs', metavar='numFixedPCs', type=int, default=0, help='Number of PCs to use as fixed effects')
parser.add_argument('--hess', metavar='hess', type=int, default=1, help='Whether to compute Hessian analytically (1) or not (0)')
parser.add_argument('--bfile', metavar='bfile', default=None, help='Binary plink file with SNPs that can be used as fixed effects')
parser.add_argument('--resfile', metavar='resfile', default=None, help='A linear regression results file in FastLMM format, used to choose SNPs that will be used as fixed effects')
parser.add_argument('--pthresh', metavar='pthresh', type=float, default=5e-8, help='p-value cutoff below which SNPs will be used as fixed effects')
parser.add_argument('--mineig', metavar='mineig', type=float, default=1e-3, help='eigenvectors with singular value below this value will not be used')
parser.add_argument('--extract', metavar='extract', default=None, help='subset of SNPs to be considered as fixed effects')
parser.add_argument('--related', metavar='related', default=None, help='File with info about related individuals to remove')
parser.add_argument('--mindist', metavar='mindist', type=int, default=0, help='Minimum distance between fixed effects SNPs')
parser.add_argument('--recenter', metavar='recenter', type=int, default=1, help='Whether to recenter features matrix so that only individuals participating in the model fitting stage will have zero mean for every feature (1 or 0)')
parser.add_argument('--maxFixedIters', metavar='maxFixedIters', type=int, default=100, help='Max number of iterations for fitting of fixed effects')
parser.add_argument('--epsilon', metavar='epsilon', type=float, default=1e-3, help='Convergence cutoff for fitting of fixed effects')
parser.add_argument('--missingPhenotype', metavar='missingPhenotype', default='-9', help='identifier for missing values (default: -9)')
args = parser.parse_args()
if (args.extract is not None and args.bfile is None): raise Exception('--extract cannot be used without --bfile')
if (args.bfile is not None and args.resfile is None): raise Exception('--bfile cannot be used without --resfile')
if (args.bfilesim is None): raise Exception('bfilesim must be supplied')
if (args.pheno is None): raise Exception('phenotype file must be supplied')
if (args.out is None): raise Exception('output file name must be supplied')
if (args.prev is None): raise Exception('prevlence must be supplied')
if (args.h2 is None): raise Exception('heritability must be supplied')
#Read bfilesim and pheno file for heritability computation
bed, phe = leapUtils.loadData(args.bfilesim, args.extractSim, args.pheno, args.missingPhenotype, loadSNPs=(args.eigen is None), standardize=True)
#Read/create eigendecomposition
if (args.eigen is not None): eigen = np.load(args.eigen)
else:
import eigenDecompose
eigen = eigenDecompose.eigenDecompose(bed)
#Compute relatedness
if (args.relCutoff <= 0): keepArr = np.ones(bed.iid.shape[0], dtype=bool)
else:
if (args.related is None):
bed2 = bed
if (args.extractSim is not None or args.eigen is not None): bed2, _ = leapUtils.loadData(args.bfilesim, None, args.pheno, args.missingPhenotype, loadSNPs=True)
keepArr = leapUtils.findRelated(bed2, args.relCutoff)
else:
keepArr = leapUtils.loadRelatedFile(bed, args.related)
#Add significant SNPs as fixed effects
covar = None
if (args.resfile is not None):
bed_fixed, _ = leapUtils.loadData(args.bfile, args.extract, args.pheno, args.missingPhenotype, loadSNPs=True)
covar = leapUtils.getSNPCovarsMatrix(bed_fixed, args.resfile, args.pthresh, args.mindist)
print 'using', covar.shape[1], 'SNPs as covariates'
#Read covar file
if (args.covar is not None):
covarsMat = leapUtils.loadCovars(bed, args.covar)
print 'Read', covarsMat.shape[1], 'covariates from file'
if (covar is None): covar = covarsMat
else: covar = np.concatenate((covar, covarsMat), axis=1)
if (args.thresholds is not None): thresholds = np.loadtxt(args.thresholds, usecols=[0])
else: thresholds = None
leapMain.probit(bed, phe, args.h2, args.prev, eigen, args.out, keepArr, covar, thresholds, args.nofail==1,
args.numSkipTopPCs, args.mineig, args.hess==1, args.recenter==1, args.maxFixedIters, args.epsilon, treatFixedAsRandom=args.treatFixedAsRandom>=1)
| apache-2.0 |
vzg100/Post-Translational-Modification-Prediction | w2vImp.py | 1 | 6828 | from random import randint
import numpy as np
import pandas as pd
import random
from gensim.models import word2vec
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import roc_auc_score
from sklearn.svm import SVC
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import Counter, defaultdict
from sklearn.pipeline import Pipeline
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
def windower(sequence, position, wing_size):
# window size = wing_size*2 +1
position = int(position)
wing_size = int(wing_size)
if (position - wing_size) < 0:
return sequence[:wing_size + position]
if (position + wing_size) > len(sequence):
return sequence[position - wing_size:]
else:
return sequence[position - wing_size:position + wing_size]
class DataCleaner:
def __init__(self, output, data="phosphosites.csv", delimit=",", amino_acid="K", sites="code",
modification="phosphorylation", window_size=7, pos="position", training_ratio=.7,
header_line=0, seq="sequence", neg_per_seq=2, lines_to_read=10000):
data = pd.read_csv(data, header=header_line, delimiter=delimit, quoting=3, dtype=object)
self.data = data.reindex(np.random.permutation(data.index))
self.amino_acid = amino_acid
self.training_ratio = training_ratio # Float value representing % of data used for training
self.proteins = {}
self.neg_count = 0
self.neg_per_seq = neg_per_seq
self.window = int(window_size)
self.features= []
self.labels = []
self.output = open(output, "a")
sequences = self.data["sequence"]
positive_sites = self.data["position"]
size = len(self.data["sequence"])
for i in range(0,size):
#print(sequences[i][int(positive_sites[i])-1])
try:
self.features.append(windower(sequences[i], positive_sites[i], self.window))
self.labels.append(1)
except:
print(i)
counter = len(self.features)
for i in range(int(counter*neg_per_seq)):
if len(self.features) >= counter*neg_per_seq:
break
selector = randint(0, size)
options = []
try:
for j in range(len(sequences[selector])):
if sequences[selector][j] == self.amino_acid:
options.append(j)
except:
pass
if len(options) > 0:
try:
random.shuffle(options)
for j in options:
t = windower(sequences[selector],j,self.window)
if t not in self.features:
self.features.append(t)
self.labels.append(0)
except:
pass
temp = list(zip(self.features, self.labels))
random.shuffle(temp)
self.features, self.labels = zip(*temp)
print(len(self.features), len(self.labels))
for i in range(len(self.features)):
t = str(self.features[i])+","+str(self.labels[i])+"\n"
self.output.write(t)
class Classy:
def __init__(self, data="clean_serine.csv", delimit=",", amino_acid="Y", training_ratio=.7, header_line=0):
self.data = open(data, "r")
self.amino_acid = amino_acid
self.training_ratio = training_ratio # Float value representing % of data used for training
self.features= []
self.labels = []
i = 0
for line in self.data:
try:
x, y = line.split(",")
y = int(y.strip("\n"))
t = []
for j in x:
t.append(j)
self.features.append(t)
self.labels.append(y)
except:
print("Bad data at line"+str(i))
i = i + 1
temp = list(zip(self.features, self.labels))
random.shuffle(temp)
self.features, self.labels = zip(*temp)
self.num_features = 300 # Word vector dimensionality
self.min_word_count = 1 # Minimum word count
self.num_workers = 4 # Number of threads to run in parallel
self.context = 5 # Context window size
self.downsampling = 5e-1 # Downsample setting for frequent words
self.model = word2vec.Word2Vec(self.features ,workers=self.num_workers, size=self.num_features, min_count=self.min_word_count,window=self.context, sample=self.downsampling)
def kluster(self):
word_vectors = self.model.wv.syn0
num_clusters = 15 # og is 4
print(num_clusters)
kmeans_clustering = KMeans(n_clusters=num_clusters)
idx = kmeans_clustering.fit_predict(word_vectors)
word_centroid_map = dict(zip(self.model.wv.index2word, idx))
for cluster in range(0, 10):
print("Cluster" +str(cluster))
words = []
val = list(word_centroid_map.values())
key = list(word_centroid_map.keys())
for i in range(len(val)):
if val[i] == cluster:
words.append(key[i])
print(words)
train_centroids = np.zeros((len(self.features), num_clusters),dtype="float32")
counter = 0
for sequence in self.features:
train_centroids[counter] = bag_of_centroids(sequence, word_centroid_map)
counter += 1
X_train, X_test, y_train, y_test = train_test_split(train_centroids, self.labels, test_size = 0.33, random_state = 42)
forest = RandomForestClassifier(n_estimators=100)
forest.fit(X_train, y_train)
result = forest.predict(X_test)
print(precision_score(y_test, result))
print(recall_score(y_test, result))
print(accuracy_score(y_test, result))
print(roc_auc_score(y_test, result))
def bag_of_centroids(wordlist, word_centroid_map):
num_centroids = max(word_centroid_map.values()) + 1
bag_of_centroids = np.zeros(num_centroids, dtype="float32")
for word in wordlist:
if word in word_centroid_map:
index = word_centroid_map[word]
bag_of_centroids[index] += 1
return bag_of_centroids
y= DataCleaner(amino_acid="H", data="Data/Training/raw/Phosphorylation_H.txt", output="Data/Training/Phosphorylation_H.txt")
| mit |
PytLab/catplot | tests/ep_canvas_test.py | 1 | 5077 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for Energy Profle Canvas.
"""
import unittest
import matplotlib.pyplot as plt
from catplot.ep_components.ep_canvas import EPCanvas
from catplot.ep_components.ep_lines import ElementaryLine
from catplot.ep_components.ep_chain import EPChain
class EPCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct ElementaryLine object correctly.
"""
canvas = EPCanvas(margin_ratio=0.2)
self.assertEqual(canvas.margin_ratio, 0.2)
self.assertIsNone(canvas.figsize)
self.assertIsNone(canvas.dpi)
self.assertIsNone(canvas.facecolor)
self.assertIsNone(canvas.edgecolor)
self.assertListEqual(canvas.lines, [])
self.assertListEqual(canvas.shadow_lines, [])
self.assertTrue(canvas.figure)
self.assertTrue(canvas.axes)
# Check invalid reaction equation.
self.assertRaises(ValueError, EPCanvas, margin_ratio=-0.1)
plt.close(canvas.figure)
def test_draw(self):
""" Make sure the lines can be added without exceptions.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8])
canvas.add_lines([line])
canvas.draw()
plt.close(canvas.figure)
def test_add_species_annotations(self):
""" Make sure the species annotations can be added without exceptions.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8],
rxn_equation="CO_b + O_b <-> CO-O_2b -> CO2_g + 2*_b")
canvas.add_lines([line])
canvas.add_species_annotations(line)
plt.close(canvas.figure)
def test_add_horizontal_auxiliary_line(self):
""" Make sure the horizontal line can be added without exceptions.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8])
canvas.add_lines([line])
canvas.add_horizontal_auxiliary_line(line)
plt.close(canvas.figure)
def test_add_vertical_auxiliary_line(self):
""" Make sure the vertical line can be added without exceptions.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8])
canvas.add_lines([line])
canvas.add_vertical_auxiliary_lines(line)
plt.close(canvas.figure)
def test_add_energy_annotations(self):
""" Make sure the energy annotations can be added correctly.
"""
canvas = EPCanvas()
line = ElementaryLine([0.0, 1.3, 0.8])
canvas.add_lines([line])
canvas.add_energy_annotations(line)
plt.close(canvas.figure)
def test_add_chain(self):
""" Test energy profile chain can be added correctly to canvas.
"""
canvas = EPCanvas()
self.assertFalse(canvas.lines)
self.assertFalse(canvas.chains)
l1 = ElementaryLine([0.0, 1.2, 0.6])
l2 = ElementaryLine([0.0, 1.0, 0.8])
chain = EPChain([l1, l2])
canvas.add_chain(chain)
self.assertEqual(len(canvas.lines), 2)
for l in canvas.lines:
self.assertTrue(isinstance(l, ElementaryLine))
self.assertEqual(len(canvas.chains), 1)
self.assertTrue(isinstance(canvas.chains[0], EPChain))
# Exception is expected if add the chain again.
self.assertRaises(ValueError, canvas.add_chain, chain)
plt.close(canvas.figure)
def test_contains(self):
canvas = EPCanvas()
l1 = ElementaryLine([0.0, 1.2, 0.6])
l2 = ElementaryLine([0.0, 1.0, 0.8])
chain = EPChain([l1])
canvas.add_chain(chain)
self.assertTrue(l1 in canvas)
self.assertTrue(chain in canvas)
self.assertFalse(l2 in canvas)
plt.close(canvas.figure)
def test_add_line(self):
""" Test the line can be add to canvas correctly.
"""
canvas = EPCanvas()
l1 = ElementaryLine([0.0, 1.2, 0.6])
canvas.add_line(l1)
# Add repeat line, exception raises.
self.assertRaises(ValueError, canvas.add_line, l1)
plt.close(canvas.figure)
def test_add_lines(self):
canvas = EPCanvas()
l1 = ElementaryLine([0.0, 1.2, 0.6])
l2 = ElementaryLine([0.0, 1.0, 0.8])
canvas.add_lines([l1, l2])
canvas.lines = []
self.assertRaises(ValueError, canvas.add_lines, [l1, l1])
plt.close(canvas.figure)
def test_add_all_horizontal_auxiliary_lines(self):
""" Make sure we can add all horizontal auxiliary lines to canvas.
"""
canvas = EPCanvas()
l1 = ElementaryLine([0.0, 1.2, 0.6])
l2 = ElementaryLine([0.0, 1.0, 0.8])
canvas.add_lines([l1, l2])
canvas.add_all_horizontal_auxiliary_lines()
plt.close(canvas.figure)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(EPCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit |
ssaeger/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
maartenbreddels/vaex | tests/graphql_test.py | 1 | 4881 | from common import *
if vaex.utils.devmode:
pytest.skip('runs too slow when developing', allow_module_level=True)
@pytest.fixture(scope='module')
def schema(ds_trimmed_cache):
ds_trimmed_cache = ds_trimmed_cache.drop('123456')
return ds_trimmed_cache.graphql.schema()
@pytest.fixture()
def df(df_trimmed):
return df_trimmed.drop('123456')
def test_aggregates(df, schema):
result = schema.execute("""
{
df {
count
min {
x
y
}
mean {
x
y
}
max {
x
y
}
}
}
""")
assert not result.errors
assert result.data['df']['count'] == len(df)
assert result.data['df']['min']['x'] == df.x.min()
assert result.data['df']['min']['y'] == df.y.min()
assert result.data['df']['max']['x'] == df.x.max()
assert result.data['df']['max']['y'] == df.y.max()
assert result.data['df']['mean']['x'] == df.x.mean()
assert result.data['df']['mean']['y'] == df.y.mean()
def test_groupby(df, schema):
result = schema.execute("""
{
df {
groupby {
x {
min {
x
}
}
}
}
}
""")
assert not result.errors
dfg = df.groupby('x', agg={'xmin': vaex.agg.min('x')})
assert result.data['df']['groupby']['x']['min']['x'] == dfg['xmin'].tolist()
def test_row_pagination(df, schema):
def values(row, name):
return [k[name] for k in row]
result = schema.execute("""
{
df {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df.x.tolist()
result = schema.execute("""
{
df {
row(offset: 2) { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[2:].x.tolist()
result = schema.execute("""
{
df {
row(limit: 2) { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[:2].x.tolist()
result = schema.execute("""
{
df {
row(offset: 3, limit: 2) { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[3:5].x.tolist()
def test_where(df, schema):
def values(row, name):
return [k[name] for k in row]
result = schema.execute("""
{
df(where: {x: {_eq: 4}}) {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[df.x==4].x.tolist()
result = schema.execute("""
{
df(where: {x: {_neq: 4}}) {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[df.x!=4].x.tolist()
result = schema.execute("""
{
df(where: {x: {_gt: 4}}) {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[df.x>4].x.tolist()
result = schema.execute("""
{
df(where: {x: {_gte: 4}}) {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[df.x>=4].x.tolist()
result = schema.execute("""
{
df(where: {x: {_lt: 4}}) {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[df.x<4].x.tolist()
result = schema.execute("""
{
df(where: {x: {_lte: 4}}) {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[df.x<=4].x.tolist()
result = schema.execute("""
{
df(where: {_not: {x: {_lte: 4}}}) {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == df[~(df.x<=4)].x.tolist()
result = schema.execute("""
{
df(where: {_or: [{x: {_eq: 4}}, {x: {_eq: 6}} ]}) {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == [4, 6]
result = schema.execute("""
{
df(where: {_and: [{x: {_gte: 4}}, {x: {_lte: 6}} ]}) {
row { x }
}
}
""")
assert not result.errors
assert values(result.data['df']['row'], 'x') == [4, 5, 6]
def test_pandas(df, schema):
df_pandas = df.to_pandas_df()
def values(row, name):
return [k[name] for k in row]
result = df_pandas.graphql.execute("""
{
df(where: {x: {_eq: 4}}) {
row { x }
}
}
""") | mit |
bnaul/scikit-learn | sklearn/utils/_estimator_html_repr.py | 4 | 9120 | from contextlib import closing
from contextlib import suppress
from io import StringIO
import uuid
import html
from sklearn import config_context
class _VisualBlock:
"""HTML Representation of Estimator
Parameters
----------
kind : {'serial', 'parallel', 'single'}
kind of HTML block
estimators : list of estimators or `_VisualBlock`s or a single estimator
If kind != 'single', then `estimators` is a list of
estimators.
If kind == 'single', then `estimators` is a single estimator.
names : list of str
If kind != 'single', then `names` corresponds to estimators.
If kind == 'single', then `names` is a single string corresponding to
the single estimator.
name_details : list of str, str, or None, default=None
If kind != 'single', then `name_details` corresponds to `names`.
If kind == 'single', then `name_details` is a single string
corresponding to the single estimator.
dash_wrapped : bool, default=True
If true, wrapped HTML element will be wrapped with a dashed border.
Only active when kind != 'single'.
"""
def __init__(self, kind, estimators, *, names=None, name_details=None,
dash_wrapped=True):
self.kind = kind
self.estimators = estimators
self.dash_wrapped = dash_wrapped
if self.kind in ('parallel', 'serial'):
if names is None:
names = (None, ) * len(estimators)
if name_details is None:
name_details = (None, ) * len(estimators)
self.names = names
self.name_details = name_details
def _sk_visual_block_(self):
return self
def _write_label_html(out, name, name_details,
outer_class="sk-label-container",
inner_class="sk-label",
checked=False):
"""Write labeled html with or without a dropdown with named details"""
out.write(f'<div class="{outer_class}">'
f'<div class="{inner_class} sk-toggleable">')
name = html.escape(name)
if name_details is not None:
checked_str = 'checked' if checked else ''
est_id = uuid.uuid4()
out.write(f'<input class="sk-toggleable__control sk-hidden--visually" '
f'id="{est_id}" type="checkbox" {checked_str}>'
f'<label class="sk-toggleable__label" for="{est_id}">'
f'{name}</label>'
f'<div class="sk-toggleable__content"><pre>{name_details}'
f'</pre></div>')
else:
out.write(f'<label>{name}</label>')
out.write('</div></div>') # outer_class inner_class
def _get_visual_block(estimator):
"""Generate information about how to display an estimator.
"""
with suppress(AttributeError):
return estimator._sk_visual_block_()
if isinstance(estimator, str):
return _VisualBlock('single', estimator,
names=estimator, name_details=estimator)
elif estimator is None:
return _VisualBlock('single', estimator,
names='None', name_details='None')
# check if estimator looks like a meta estimator wraps estimators
if hasattr(estimator, 'get_params'):
estimators = []
for key, value in estimator.get_params().items():
# Only look at the estimators in the first layer
if '__' not in key and hasattr(value, 'get_params'):
estimators.append(value)
if len(estimators):
return _VisualBlock('parallel', estimators, names=None)
return _VisualBlock('single', estimator,
names=estimator.__class__.__name__,
name_details=str(estimator))
def _write_estimator_html(out, estimator, estimator_label,
estimator_label_details, first_call=False):
"""Write estimator to html in serial, parallel, or by itself (single).
"""
if first_call:
est_block = _get_visual_block(estimator)
else:
with config_context(print_changed_only=True):
est_block = _get_visual_block(estimator)
if est_block.kind in ('serial', 'parallel'):
dashed_wrapped = first_call or est_block.dash_wrapped
dash_cls = " sk-dashed-wrapped" if dashed_wrapped else ""
out.write(f'<div class="sk-item{dash_cls}">')
if estimator_label:
_write_label_html(out, estimator_label, estimator_label_details)
kind = est_block.kind
out.write(f'<div class="sk-{kind}">')
est_infos = zip(est_block.estimators, est_block.names,
est_block.name_details)
for est, name, name_details in est_infos:
if kind == 'serial':
_write_estimator_html(out, est, name, name_details)
else: # parallel
out.write('<div class="sk-parallel-item">')
# wrap element in a serial visualblock
serial_block = _VisualBlock('serial', [est],
dash_wrapped=False)
_write_estimator_html(out, serial_block, name, name_details)
out.write('</div>') # sk-parallel-item
out.write('</div></div>')
elif est_block.kind == 'single':
_write_label_html(out, est_block.names, est_block.name_details,
outer_class="sk-item", inner_class="sk-estimator",
checked=first_call)
_STYLE = """
div.sk-top-container {
color: black;
background-color: white;
}
div.sk-toggleable {
background-color: white;
}
label.sk-toggleable__label {
cursor: pointer;
display: block;
width: 100%;
margin-bottom: 0;
padding: 0.2em 0.3em;
box-sizing: border-box;
text-align: center;
}
div.sk-toggleable__content {
max-height: 0;
max-width: 0;
overflow: hidden;
text-align: left;
background-color: #f0f8ff;
}
div.sk-toggleable__content pre {
margin: 0.2em;
color: black;
border-radius: 0.25em;
background-color: #f0f8ff;
}
input.sk-toggleable__control:checked~div.sk-toggleable__content {
max-height: 200px;
max-width: 100%;
overflow: auto;
}
div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {
background-color: #d4ebff;
}
div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {
background-color: #d4ebff;
}
input.sk-hidden--visually {
border: 0;
clip: rect(1px 1px 1px 1px);
clip: rect(1px, 1px, 1px, 1px);
height: 1px;
margin: -1px;
overflow: hidden;
padding: 0;
position: absolute;
width: 1px;
}
div.sk-estimator {
font-family: monospace;
background-color: #f0f8ff;
margin: 0.25em 0.25em;
border: 1px dotted black;
border-radius: 0.25em;
box-sizing: border-box;
}
div.sk-estimator:hover {
background-color: #d4ebff;
}
div.sk-parallel-item::after {
content: "";
width: 100%;
border-bottom: 1px solid gray;
flex-grow: 1;
}
div.sk-label:hover label.sk-toggleable__label {
background-color: #d4ebff;
}
div.sk-serial::before {
content: "";
position: absolute;
border-left: 1px solid gray;
box-sizing: border-box;
top: 2em;
bottom: 0;
left: 50%;
}
div.sk-serial {
display: flex;
flex-direction: column;
align-items: center;
background-color: white;
}
div.sk-item {
z-index: 1;
}
div.sk-parallel {
display: flex;
align-items: stretch;
justify-content: center;
background-color: white;
}
div.sk-parallel-item {
display: flex;
flex-direction: column;
position: relative;
background-color: white;
}
div.sk-parallel-item:first-child::after {
align-self: flex-end;
width: 50%;
}
div.sk-parallel-item:last-child::after {
align-self: flex-start;
width: 50%;
}
div.sk-parallel-item:only-child::after {
width: 0;
}
div.sk-dashed-wrapped {
border: 1px dashed gray;
margin: 0.2em;
box-sizing: border-box;
padding-bottom: 0.1em;
background-color: white;
position: relative;
}
div.sk-label label {
font-family: monospace;
font-weight: bold;
background-color: white;
display: inline-block;
line-height: 1.2em;
}
div.sk-label-container {
position: relative;
z-index: 2;
text-align: center;
}
div.sk-container {
display: inline-block;
position: relative;
}
""".replace(' ', '').replace('\n', '') # noqa
def estimator_html_repr(estimator):
"""Build a HTML representation of an estimator.
Read more in the :ref:`User Guide <visualizing_composite_estimators>`.
Parameters
----------
estimator : estimator object
The estimator to visualize.
Returns
-------
html: str
HTML representation of estimator.
"""
with closing(StringIO()) as out:
out.write(f'<style>{_STYLE}</style>'
f'<div class="sk-top-container"><div class="sk-container">')
_write_estimator_html(out, estimator, estimator.__class__.__name__,
str(estimator), first_call=True)
out.write('</div></div>')
html_output = out.getvalue()
return html_output
| bsd-3-clause |
michalsenkyr/spark | python/pyspark/sql/session.py | 2 | 36039 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
jsqlContext = self._wrapped._jsqlContext
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(batches, ArrowStreamSerializer(), reader_func,
create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.fallback.enabled' has been set to "
"false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
demianw/dipy | scratch/very_scratch/simulation_comparisons_modified.py | 20 | 13117 | import nibabel
import os
import numpy as np
import dipy as dp
import dipy.core.generalized_q_sampling as dgqs
import dipy.io.pickles as pkl
import scipy as sp
from matplotlib.mlab import find
import dipy.core.sphere_plots as splots
import dipy.core.sphere_stats as sphats
import dipy.core.geometry as geometry
import get_vertices as gv
#old SimData files
'''
results_SNR030_1fibre
results_SNR030_1fibre+iso
results_SNR030_2fibres_15deg
results_SNR030_2fibres_30deg
results_SNR030_2fibres_60deg
results_SNR030_2fibres_90deg
results_SNR030_2fibres+iso_15deg
results_SNR030_2fibres+iso_30deg
results_SNR030_2fibres+iso_60deg
results_SNR030_2fibres+iso_90deg
results_SNR030_isotropic
'''
#fname='/home/ian/Data/SimData/results_SNR030_1fibre'
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 1000 * 100 is the number of all rows.
The 100 conditions are given by 10 polar angles (in degrees) 0, 20, 40, 60, 80,
80, 60, 40, 20 and 0, and each of these with longitude angle 0, 40, 80,
120, 160, 200, 240, 280, 320, 360.
'''
#new complete SimVoxels files
simdata = ['fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
simdir = '/home/ian/Data/SimVoxels/'
def gq_tn_calc_save():
for simfile in simdata:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
gq = dp.GeneralizedQSampling(sim_data,bvals,gradients)
gqfile = simdir+'gq/'+dataname+'.pkl'
pkl.save_pickle(gqfile,gq)
'''
gq.IN gq.__doc__ gq.glob_norm_param
gq.QA gq.__init__ gq.odf
gq.__class__ gq.__module__ gq.q2odf_params
'''
tn = dp.Tensor(sim_data,bvals,gradients)
tnfile = simdir+'tn/'+dataname+'.pkl'
pkl.save_pickle(tnfile,tn)
'''
tn.ADC tn.__init__ tn._getevals
tn.B tn.__module__ tn._getevecs
tn.D tn.__new__ tn._getndim
tn.FA tn.__reduce__ tn._getshape
tn.IN tn.__reduce_ex__ tn._setevals
tn.MD tn.__repr__ tn._setevecs
tn.__class__ tn.__setattr__ tn.adc
tn.__delattr__ tn.__sizeof__ tn.evals
tn.__dict__ tn.__str__ tn.evecs
tn.__doc__ tn.__subclasshook__ tn.fa
tn.__format__ tn.__weakref__ tn.md
tn.__getattribute__ tn._evals tn.ndim
tn.__getitem__ tn._evecs tn.shape
tn.__hash__ tn._getD
'''
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 100 * 1000 is the number of all rows.
At the moment this module is hardwired to the use of the EDS362
spherical mesh. I am assumung (needs testing) that directions 181 to 361
are the antipodal partners of directions 0 to 180. So when counting the
number of different vertices that occur as maximal directions we wll map
the indices modulo 181.
'''
def analyze_maxima(indices, max_dirs, subsets):
'''This calculates the eigenstats for each of the replicated batches
of the simulation data
'''
results = []
for direction in subsets:
batch = max_dirs[direction,:,:]
index_variety = np.array([len(set(np.remainder(indices[direction,:],181)))])
#normed_centroid, polar_centroid, centre, b1 = sphats.eigenstats(batch)
centre, b1 = sphats.eigenstats(batch)
# make azimuth be in range (0,360) rather than (-180,180)
centre[1] += 360*(centre[1] < 0)
#results.append(np.concatenate((normed_centroid, polar_centroid, centre, b1, index_variety)))
results.append(np.concatenate((centre, b1, index_variety)))
return results
#dt_first_directions = tn.evecs[:,:,0].reshape((100,1000,3))
# these are the principal directions for the full set of simulations
#gq_tn_calc_save()
eds=np.load(os.path.join(os.path.dirname(dp.__file__),'core','matrices','evenly_distributed_sphere_362.npz'))
odf_vertices=eds['vertices']
def run_comparisons(sample_data=35):
for simfile in [simdata[sample_data]]:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
gqfile = simdir+'gq/'+dataname+'.pkl'
gq = pkl.load_pickle(gqfile)
tnfile = simdir+'tn/'+dataname+'.pkl'
tn = pkl.load_pickle(tnfile)
dt_first_directions_in=odf_vertices[tn.IN]
dt_indices = tn.IN.reshape((100,1000))
dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,90))
gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(10,90))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
np.set_printoptions(precision=3, suppress=True, linewidth=200, threshold=5000)
out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w')
#print np.vstack(dt_results).shape, np.vstack(gq_results).shape
results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#print results.shape
#results = np.vstack(dt_results)
print >> out, results[:,:]
out.close()
#up = dt_batch[:,2]>= 0
#splots.plot_sphere(dt_batch[up], 'batch '+str(direction))
#splots.plot_lambert(dt_batch[up],'batch '+str(direction), centre)
#spread = gq.q2odf_params e,v = np.linalg.eigh(np.dot(spread,spread.transpose())) effective_dimension = len(find(np.cumsum(e) > 0.05*np.sum(e))) #95%
#rotated = np.dot(dt_batch,evecs)
#rot_evals, rot_evecs = np.linalg.eig(np.dot(rotated.T,rotated)/rotated.shape[0])
#eval_order = np.argsort(rot_evals)
#rotated = rotated[:,eval_order]
#up = rotated[:,2]>= 0
#splot.plot_sphere(rotated[up],'first1000')
#splot.plot_lambert(rotated[up],'batch '+str(direction))
def run_gq_sims(sample_data=[35,23,46,39,40,10,37,27,21,20]):
results = []
out = open('/home/ian/Data/SimVoxels/Out/'+'npa+fa','w')
for j in range(len(sample_data)):
sample = sample_data[j]
simfile = simdata[sample]
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
for j in np.vstack((np.arange(100)*1000,np.arange(100)*1000+1)).T.ravel():
# 0,1,1000,1001,2000,2001,...
s = sim_data[j,:]
gqs = dp.GeneralizedQSampling(s.reshape((1,102)),bvals,gradients,Lambda=3.5)
tn = dp.Tensor(s.reshape((1,102)),bvals,gradients,fit_method='LS')
t0, t1, t2, npa = gqs.npa(s, width = 5)
print >> out, dataname, j, npa, tn.fa()[0]
'''
for (i,o) in enumerate(gqs.odf(s)):
print i,o
for (i,o) in enumerate(gqs.odf_vertices):
print i,o
'''
#o = gqs.odf(s)
#v = gqs.odf_vertices
#pole = v[t0[0]]
#eqv = dgqs.equatorial_zone_vertices(v, pole, 5)
#print 'Number of equatorial vertices: ', len(eqv)
#print np.max(o[eqv]),np.min(o[eqv])
#cos_e_pole = [np.dot(pole.T, v[i]) for i in eqv]
#print np.min(cos1), np.max(cos1)
#print 'equatorial max in equatorial vertices:', t1[0] in eqv
#x = np.cross(v[t0[0]],v[t1[0]])
#x = x/np.sqrt(np.sum(x**2))
#print x
#ptchv = dgqs.patch_vertices(v, x, 5)
#print len(ptchv)
#eqp = eqv[np.argmin([np.abs(np.dot(v[t1[0]].T,v[p])) for p in eqv])]
#print (eqp, o[eqp])
#print t2[0] in ptchv, t2[0] in eqv
#print np.dot(pole.T, v[t1[0]]), np.dot(pole.T, v[t2[0]])
#print ptchv[np.argmin([o[v] for v in ptchv])]
#gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
#gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
#gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
#np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000)
#out = open('/home/ian/Data/SimVoxels/Out/'+'+++_'+dataname,'w')
#results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#results = np.vstack(dt_results)
#print >> out, results[:,:]
out.close()
run_comparisons()
#run_gq_sims()
| bsd-3-clause |
karthikvadla16/spark-tk | regression-tests/sparktkregtests/testcases/dicom/dicom_export_dcm_test.py | 11 | 4051 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests export_dcm functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import numpy
from lxml import etree
class ExportDicomTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(ExportDicomTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = self.get_local_dataset("dicom_xml/")
self.image_directory = self.get_local_dataset("dicom_uncompressed/")
@unittest.skip("Sparktk: dicom export to dcm throws error")
def test_export_to_dcm(self):
# first we will set aside the original data so that we can use it to
# compare with later
original_metadata = self.dicom.metadata.to_pandas()["metadata"]
original_formatted = []
# we will remove the bulkdata tag from our original metadata
# since the bulkdata tag records source of dicom and should
# be ignored during comparison
for metadata in original_metadata:
ascii_metadata = metadata.encode("ascii", "ignore")
xml_root = etree.fromstring(ascii_metadata)
bulk_data_tag = xml_root.xpath("//BulkData")[0]
bulk_data_tag.getparent().remove(bulk_data_tag)
original_formatted.append(etree.tostring(xml_root))
original_imagedata = self.dicom.pixeldata.to_pandas()["imagematrix"]
# now we export the dicom object
# we use our QA libraries to generate a path
# we save the dicom to that path
dcm_export_path = self.get_export_file(self.get_name("DICOM_EXPORT"))
self.dicom.export_to_dcm(dcm_export_path)
# Now we will load back the data we just saved into a new dicom object
# so that we can ensure the data is the same
loaded_dicom = self.context.dicom.import_dcm(dcm_export_path)
# get the loaded metadata and imagedata
loaded_metadata = loaded_dicom.metadata.to_pandas()["metadata"]
loaded_imagedata = loaded_dicom.pixeldata.to_pandas()["imagematrix"]
# ensure that the loaded metadata and imagedata is of the same len
# as the original data,
# then iterate through the records and ensure they are the same
self.assertEqual(len(loaded_metadata), len(original_metadata))
self.assertEqual(len(loaded_imagedata), len(original_imagedata))
for actual in loaded_metadata:
# for each metadata we will remove the bulkdata tag before we
# compare it with the original data since the bulk data tag
# records the source for the dcm and may differ since
# we are loading from a different location than the original dicom
actual_root = etree.fromstring(actual.encode("ascii", "ignore"))
actual_bulk_data_tag = actual_root.xpath("//BulkData")[0]
actual_bulk_data_tag.getparent().remove(actual_bulk_data_tag)
actual = etree.tostring(actual_root)
self.assertTrue(actual in original_formatted)
for actual in loaded_imagedata:
result = any(numpy.array_equal(actual, original) for original in original_imagedata)
self.assertTrue(result)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
CallaJun/hackprince | indico/skimage/viewer/canvastools/linetool.py | 5 | 6887 | import numpy as np
from matplotlib import lines
from ...viewer.canvastools.base import CanvasToolBase, ToolHandles
__all__ = ['LineTool', 'ThickLineTool']
class LineTool(CanvasToolBase):
"""Widget for line selection in a plot.
Parameters
----------
viewer : :class:`skimage.viewer.Viewer`
Skimage viewer object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, viewer, on_move=None, on_release=None, on_enter=None,
maxdist=10, line_props=None, handle_props=None,
**kwargs):
super(LineTool, self).__init__(viewer, on_move=on_move,
on_enter=on_enter,
on_release=on_release, **kwargs)
props = dict(color='r', linewidth=1, alpha=0.4, solid_capstyle='butt')
props.update(line_props if line_props is not None else {})
self.linewidth = props['linewidth']
self.maxdist = maxdist
self._active_pt = None
x = (0, 0)
y = (0, 0)
self._end_pts = np.transpose([x, y])
self._line = lines.Line2D(x, y, visible=False, animated=True, **props)
self.ax.add_line(self._line)
self._handles = ToolHandles(self.ax, x, y,
marker_props=handle_props)
self._handles.set_visible(False)
self.artists = [self._line, self._handles.artist]
if on_enter is None:
def on_enter(pts):
x, y = np.transpose(pts)
print("length = %0.2f" %
np.sqrt(np.diff(x)**2 + np.diff(y)**2))
self.callback_on_enter = on_enter
viewer.add_tool(self)
@property
def end_points(self):
return self._end_pts.astype(int)
@end_points.setter
def end_points(self, pts):
self._end_pts = np.asarray(pts)
self._line.set_data(np.transpose(pts))
self._handles.set_data(np.transpose(pts))
self._line.set_linewidth(self.linewidth)
self.set_visible(True)
self.redraw()
def hit_test(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return False
idx, px_dist = self._handles.closest(event.x, event.y)
if px_dist < self.maxdist:
self._active_pt = idx
return True
else:
self._active_pt = None
return False
def on_mouse_press(self, event):
self.set_visible(True)
if self._active_pt is None:
self._active_pt = 0
x, y = event.xdata, event.ydata
self._end_pts = np.array([[x, y], [x, y]])
def on_mouse_release(self, event):
if event.button != 1:
return
self._active_pt = None
self.callback_on_release(self.geometry)
self.redraw()
def on_move(self, event):
if event.button != 1 or self._active_pt is None:
return
if not self.ax.in_axes(event):
return
self.update(event.xdata, event.ydata)
self.callback_on_move(self.geometry)
def update(self, x=None, y=None):
if x is not None:
self._end_pts[self._active_pt, :] = x, y
self.end_points = self._end_pts
@property
def geometry(self):
return self.end_points
class ThickLineTool(LineTool):
"""Widget for line selection in a plot.
The thickness of the line can be varied using the mouse scroll wheel, or
with the '+' and '-' keys.
Parameters
----------
viewer : :class:`skimage.viewer.Viewer`
Skimage viewer object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
on_change : function
Function called whenever the line thickness is changed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, viewer, on_move=None, on_enter=None, on_release=None,
on_change=None, maxdist=10, line_props=None, handle_props=None):
super(ThickLineTool, self).__init__(viewer,
on_move=on_move,
on_enter=on_enter,
on_release=on_release,
maxdist=maxdist,
line_props=line_props,
handle_props=handle_props)
if on_change is None:
def on_change(*args):
pass
self.callback_on_change = on_change
def on_scroll(self, event):
if not event.inaxes:
return
if event.button == 'up':
self._thicken_scan_line()
elif event.button == 'down':
self._shrink_scan_line()
def on_key_press(self, event):
if event.key == '+':
self._thicken_scan_line()
elif event.key == '-':
self._shrink_scan_line()
def _thicken_scan_line(self):
self.linewidth += 1
self.update()
self.callback_on_change(self.geometry)
def _shrink_scan_line(self):
if self.linewidth > 1:
self.linewidth -= 1
self.update()
self.callback_on_change(self.geometry)
if __name__ == '__main__': # pragma: no cover
from ... import data
from ...viewer import ImageViewer
image = data.camera()
viewer = ImageViewer(image)
h, w = image.shape
line_tool = ThickLineTool(viewer)
line_tool.end_points = ([w/3, h/2], [2*w/3, h/2])
viewer.show()
| lgpl-3.0 |
gef756/statsmodels | statsmodels/genmod/cov_struct.py | 19 | 46892 | from statsmodels.compat.python import iterkeys, itervalues, zip, range
from statsmodels.stats.correlation_tools import cov_nearest
import numpy as np
import pandas as pd
from scipy import linalg as spl
from collections import defaultdict
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
import warnings
"""
Some details for the covariance calculations can be found in the Stata
docs:
http://www.stata.com/manuals13/xtxtgee.pdf
"""
class CovStruct(object):
"""
A base class for correlation and covariance structures of grouped
data.
Each implementation of this class takes the residuals from a
regression model that has been fitted to grouped data, and uses
them to estimate the within-group dependence structure of the
random errors in the model.
The state of the covariance structure is represented through the
value of the class variable `dep_params`. The default state of a
newly-created instance should correspond to the identity
correlation matrix.
"""
def __init__(self, cov_nearest_method="clipped"):
# Parameters describing the dependency structure
self.dep_params = None
# Keep track of the number of times that the covariance was
# adjusted.
self.cov_adjust = []
# Method for projecting the covariance matrix if it not SPD.
self.cov_nearest_method = cov_nearest_method
def initialize(self, model):
"""
Called by GEE, used by implementations that need additional
setup prior to running `fit`.
Parameters
----------
model : GEE class
A reference to the parent GEE class instance.
"""
self.model = model
def update(self, params):
"""
Updates the association parameter values based on the current
regression coefficients.
Parameters
----------
params : array-like
Working values for the regression parameters.
"""
raise NotImplementedError
def covariance_matrix(self, endog_expval, index):
"""
Returns the working covariance or correlation matrix for a
given cluster of data.
Parameters
----------
endog_expval: array-like
The expected values of endog for the cluster for which the
covariance or correlation matrix will be returned
index: integer
The index of the cluster for which the covariane or
correlation matrix will be returned
Returns
-------
M: matrix
The covariance or correlation matrix of endog
is_cor: bool
True if M is a correlation matrix, False if M is a
covariance matrix
"""
raise NotImplementedError
def covariance_matrix_solve(self, expval, index, stdev, rhs):
"""
Solves matrix equations of the form `covmat * soln = rhs` and
returns the values of `soln`, where `covmat` is the covariance
matrix represented by this class.
Parameters
----------
expval: array-like
The expected value of endog for each observed value in the
group.
index: integer
The group index.
stdev : array-like
The standard deviation of endog for each observation in
the group.
rhs : list/tuple of array-like
A set of right-hand sides; each defines a matrix equation
to be solved.
Returns
-------
soln : list/tuple of array-like
The solutions to the matrix equations.
Notes
-----
Returns None if the solver fails.
Some dependence structures do not use `expval` and/or `index`
to determine the correlation matrix. Some families
(e.g. binomial) do not use the `stdev` parameter when forming
the covariance matrix.
If the covariance matrix is singular or not SPD, it is
projected to the nearest such matrix. These projection events
are recorded in the fit_history member of the GEE model.
Systems of linear equations with the covariance matrix as the
left hand side (LHS) are solved for different right hand sides
(RHS); the LHS is only factorized once to save time.
This is a default implementation, it can be reimplemented in
subclasses to optimize the linear algebra according to the
struture of the covariance matrix.
"""
vmat, is_cor = self.covariance_matrix(expval, index)
if is_cor:
vmat *= np.outer(stdev, stdev)
# Factor the covariance matrix. If the factorization fails,
# attempt to condition it into a factorizable matrix.
threshold = 1e-2
success = False
cov_adjust = 0
for itr in range(20):
try:
vco = spl.cho_factor(vmat)
success = True
break
except np.linalg.LinAlgError:
vmat = cov_nearest(vmat, method=self.cov_nearest_method,
threshold=threshold)
threshold *= 2
cov_adjust += 1
self.cov_adjust.append(cov_adjust)
# Last resort if we still can't factor the covariance matrix.
if success == False:
warnings.warn("Unable to condition covariance matrix to an SPD matrix using cov_nearest",
ConvergenceWarning)
vmat = np.diag(np.diag(vmat))
vco = spl.cho_factor(vmat)
soln = [spl.cho_solve(vco, x) for x in rhs]
return soln
def summary(self):
"""
Returns a text summary of the current estimate of the
dependence structure.
"""
raise NotImplementedError
class Independence(CovStruct):
"""
An independence working dependence structure.
"""
# Nothing to update
def update(self, params):
return
def covariance_matrix(self, expval, index):
dim = len(expval)
return np.eye(dim, dtype=np.float64), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
v = stdev**2
rslt = []
for x in rhs:
if x.ndim == 1:
rslt.append(x / v)
else:
rslt.append(x / v[:, None])
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return "Observations within a cluster are modeled as being independent."
class Exchangeable(CovStruct):
"""
An exchangeable working dependence structure.
"""
def __init__(self):
super(Exchangeable, self).__init__()
# The correlation between any two values in the same cluster
self.dep_params = 0.
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
varfunc = self.model.family.variance
cached_means = self.model.cached_means
has_weights = self.model.weights is not None
weights_li = self.model.weights
residsq_sum, scale = 0, 0
fsum1, fsum2, n_pairs = 0., 0., 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
f = weights_li[i] if has_weights else 1.
ngrp = len(resid)
residsq = np.outer(resid, resid)
scale += f * np.trace(residsq)
fsum1 += f * len(endog[i])
residsq = np.tril(residsq, -1)
residsq_sum += f * residsq.sum()
npr = 0.5 * ngrp * (ngrp - 1)
fsum2 += f * npr
n_pairs += npr
ddof = self.model.ddof_scale
scale /= (fsum1 * (nobs - ddof) / float(nobs))
residsq_sum /= scale
self.dep_params = residsq_sum / (fsum2 * (n_pairs - ddof) / float(n_pairs))
def covariance_matrix(self, expval, index):
dim = len(expval)
dp = self.dep_params * np.ones((dim, dim), dtype=np.float64)
np.fill_diagonal(dp, 1)
return dp, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
k = len(expval)
c = self.dep_params / (1. - self.dep_params)
c /= 1. + self.dep_params * (k - 1)
rslt = []
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
y = x1 / (1. - self.dep_params)
y -= c * sum(x1)
y /= stdev
else:
x1 = x / stdev[:, None]
y = x1 / (1. - self.dep_params)
y -= c * x1.sum(0)
y /= stdev[:, None]
rslt.append(y)
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("The correlation between two observations in the " +
"same cluster is %.3f" % self.dep_params)
class Nested(CovStruct):
"""
A nested working dependence structure.
A working dependence structure that captures a nested hierarchy of
groups, each level of which contributes to the random error term
of the model.
When using this working covariance structure, `dep_data` of the
GEE instance should contain a n_obs x k matrix of 0/1 indicators,
corresponding to the k subgroups nested under the top-level
`groups` of the GEE instance. These subgroups should be nested
from left to right, so that two observations with the same value
for column j of `dep_data` should also have the same value for all
columns j' < j (this only applies to observations in the same
top-level cluster given by the `groups` argument to GEE).
Examples
--------
Suppose our data are student test scores, and the students are in
classrooms, nested in schools, nested in school districts. The
school district is the highest level of grouping, so the school
district id would be provided to GEE as `groups`, and the school
and classroom id's would be provided to the Nested class as the
`dep_data` argument, e.g.
0 0 # School 0, classroom 0, student 0
0 0 # School 0, classroom 0, student 1
0 1 # School 0, classroom 1, student 0
0 1 # School 0, classroom 1, student 1
1 0 # School 1, classroom 0, student 0
1 0 # School 1, classroom 0, student 1
1 1 # School 1, classroom 1, student 0
1 1 # School 1, classroom 1, student 1
Labels lower in the hierarchy are recycled, so that student 0 in
classroom 0 is different fro student 0 in classroom 1, etc.
Notes
-----
The calculations for this dependence structure involve all pairs
of observations within a group (that is, within the top level
`group` structure passed to GEE). Large group sizes will result
in slow iterations.
The variance components are estimated using least squares
regression of the products r*r', for standardized residuals r and
r' in the same group, on a vector of indicators defining which
variance components are shared by r and r'.
"""
def initialize(self, model):
"""
Called on the first call to update
`ilabels` is a list of n_i x n_i matrices containing integer
labels that correspond to specific correlation parameters.
Two elements of ilabels[i] with the same label share identical
variance components.
`designx` is a matrix, with each row containing dummy
variables indicating which variance components are associated
with the corresponding element of QY.
"""
super(Nested, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for nested cov_struct, using unweighted covariance estimate")
# A bit of processing of the nest data
id_matrix = np.asarray(self.model.dep_data)
if id_matrix.ndim == 1:
id_matrix = id_matrix[:,None]
self.id_matrix = id_matrix
endog = self.model.endog_li
designx, ilabels = [], []
# The number of layers of nesting
n_nest = self.id_matrix.shape[1]
for i in range(self.model.num_group):
ngrp = len(endog[i])
glab = self.model.group_labels[i]
rix = self.model.group_indices[glab]
# Determine the number of common variance components
# shared by each pair of observations.
ix1, ix2 = np.tril_indices(ngrp, -1)
ncm = (self.id_matrix[rix[ix1], :] ==
self.id_matrix[rix[ix2], :]).sum(1)
# This is used to construct the working correlation
# matrix.
ilabel = np.zeros((ngrp, ngrp), dtype=np.int32)
ilabel[[ix1, ix2]] = ncm + 1
ilabel[[ix2, ix1]] = ncm + 1
ilabels.append(ilabel)
# This is used to estimate the variance components.
dsx = np.zeros((len(ix1), n_nest+1), dtype=np.float64)
dsx[:,0] = 1
for k in np.unique(ncm):
ii = np.flatnonzero(ncm == k)
dsx[ii, 1:k+1] = 1
designx.append(dsx)
self.designx = np.concatenate(designx, axis=0)
self.ilabels = ilabels
svd = np.linalg.svd(self.designx, 0)
self.designx_u = svd[0]
self.designx_s = svd[1]
self.designx_v = svd[2].T
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
dim = len(params)
if self.designx is None:
self._compute_design(self.model)
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dvmat = []
scale = 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
ix1, ix2 = np.tril_indices(len(resid), -1)
dvmat.append(resid[ix1] * resid[ix2])
scale += np.sum(resid**2)
dvmat = np.concatenate(dvmat)
scale /= (nobs - dim)
# Use least squares regression to estimate the variance
# components
vcomp_coeff = np.dot(self.designx_v, np.dot(self.designx_u.T,
dvmat) / self.designx_s)
self.vcomp_coeff = np.clip(vcomp_coeff, 0, np.inf)
self.scale = scale
self.dep_params = self.vcomp_coeff.copy()
def covariance_matrix(self, expval, index):
dim = len(expval)
# First iteration
if self.dep_params is None:
return np.eye(dim, dtype=np.float64), True
ilabel = self.ilabels[index]
c = np.r_[self.scale, np.cumsum(self.vcomp_coeff)]
vmat = c[ilabel]
vmat /= self.scale
return vmat, True
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
"""
Returns a summary string describing the state of the
dependence structure.
"""
msg = "Variance estimates\n------------------\n"
for k in range(len(self.vcomp_coeff)):
msg += "Component %d: %.3f\n" % (k+1, self.vcomp_coeff[k])
msg += "Residual: %.3f\n" % (self.scale -
np.sum(self.vcomp_coeff))
return msg
class Stationary(CovStruct):
"""
A stationary covariance structure.
The correlation between two observations is an arbitrary function
of the distance between them. Distances up to a given maximum
value are included in the covariance model.
Parameters
----------
max_lag : float
The largest distance that is included in the covariance model.
grid : bool
If True, the index positions in the data (after dropping missing
values) are used to define distances, and the `time` variable is
ignored.
"""
def __init__(self, max_lag=1, grid=False):
super(Stationary, self).__init__()
self.max_lag = max_lag
self.grid = grid
self.dep_params = np.zeros(max_lag)
def initialize(self, model):
super(Stationary, self).initialize(model)
# Time used as an index needs to be integer type.
if self.grid == False:
time = self.model.time[:, 0].astype(np.int32)
self.time = self.model.cluster_list(time)
def update(self, params):
if self.grid:
self.update_grid(params)
else:
self.update_nogrid(params)
def update_grid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
dep_params[0] += np.sum(resid * resid) / len(resid)
for j in range(1, self.max_lag + 1):
dep_params[j] += np.sum(resid[0:-j] * resid[j:]) / len(resid[j:])
self.dep_params = dep_params[1:] / dep_params[0]
def update_nogrid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
dn = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
j1, j2 = np.tril_indices(len(expval))
dx = np.abs(self.time[i][j1] - self.time[i][j2])
ii = np.flatnonzero(dx <= self.max_lag)
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
vs = np.bincount(dx, weights=resid[j1] * resid[j2], minlength=self.max_lag+1)
vd = np.bincount(dx, minlength=self.max_lag+1)
ii = np.flatnonzero(vd > 0)
dn[ii] += 1
if len(ii) > 0:
dep_params[ii] += vs[ii] / vd[ii]
dep_params /= dn
self.dep_params = dep_params[1:] / dep_params[0]
def covariance_matrix(self, endog_expval, index):
if self.grid:
return self.covariance_matrix_grid(endog_expal, index)
j1, j2 = np.tril_indices(len(endog_expval))
dx = np.abs(self.time[index][j1] - self.time[index][j2])
ii = np.flatnonzero((0 < dx) & (dx <= self.max_lag))
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
cmat = np.eye(len(endog_expval))
cmat[j1, j2] = self.dep_params[dx - 1]
cmat[j2, j1] = self.dep_params[dx - 1]
return cmat, True
def covariance_matrix_grid(self, endog_expval, index):
from scipy.linalg import toeplitz
r = np.zeros(len(endog_expval))
r[0] = 1
r[1:self.max_lag + 1] = self.dep_params
return toeplitz(r), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
if self.grid == False:
return super(Stationary, self).covariance_matrix_solve(expval, index, stdev, rhs)
from statsmodels.tools.linalg import stationary_solve
r = np.zeros(len(expval))
r[0:self.max_lag] = self.dep_params
return [stationary_solve(r, x) for x in rhs]
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Stationary dependence parameters\n",
self.dep_params)
class Autoregressive(CovStruct):
"""
A first-order autoregressive working dependence structure.
The dependence is defined in terms of the `time` component of the
parent GEE class, which defaults to the index position of each
value within its cluster, based on the order of values in the
input data set. Time represents a potentially multidimensional
index from which distances between pairs of observations can be
determined.
The correlation between two observations in the same cluster is
dep_params^distance, where `dep_params` contains the (scalar)
autocorrelation parameter to be estimated, and `distance` is the
distance between the two observations, calculated from their
corresponding time values. `time` is stored as an n_obs x k
matrix, where `k` represents the number of dimensions in the time
index.
The autocorrelation parameter is estimated using weighted
nonlinear least squares, regressing each value within a cluster on
each preceeding value in the same cluster.
Parameters
----------
dist_func: function from R^k x R^k to R^+, optional
A function that computes the distance between the two
observations based on their `time` values.
References
----------
B Rosner, A Munoz. Autoregressive modeling for the analysis of
longitudinal data with unequally spaced examinations. Statistics
in medicine. Vol 7, 59-71, 1988.
"""
def __init__(self, dist_func=None):
super(Autoregressive, self).__init__()
# The function for determining distances based on time
if dist_func is None:
self.dist_func = lambda x, y: np.abs(x - y).sum()
else:
self.dist_func = dist_func
self.designx = None
# The autocorrelation parameter
self.dep_params = 0.
def update(self, params):
if self.model.weights is not None:
warnings.warn("weights not implemented for autoregressive cov_struct, using unweighted covariance estimate")
endog = self.model.endog_li
time = self.model.time_li
# Only need to compute this once
if self.designx is not None:
designx = self.designx
else:
designx = []
for i in range(self.model.num_group):
ngrp = len(endog[i])
if ngrp == 0:
continue
# Loop over pairs of observations within a cluster
for j1 in range(ngrp):
for j2 in range(j1):
designx.append(self.dist_func(time[i][j1, :],
time[i][j2, :]))
designx = np.array(designx)
self.designx = designx
scale = self.model.estimate_scale()
varfunc = self.model.family.variance
cached_means = self.model.cached_means
# Weights
var = 1. - self.dep_params**(2*designx)
var /= 1. - self.dep_params**2
wts = 1. / var
wts /= wts.sum()
residmat = []
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(scale * varfunc(expval))
resid = (endog[i] - expval) / stdev
ngrp = len(resid)
for j1 in range(ngrp):
for j2 in range(j1):
residmat.append([resid[j1], resid[j2]])
residmat = np.array(residmat)
# Need to minimize this
def fitfunc(a):
dif = residmat[:, 0] - (a**designx)*residmat[:, 1]
return np.dot(dif**2, wts)
# Left bracket point
b_lft, f_lft = 0., fitfunc(0.)
# Center bracket point
b_ctr, f_ctr = 0.5, fitfunc(0.5)
while f_ctr > f_lft:
b_ctr /= 2
f_ctr = fitfunc(b_ctr)
if b_ctr < 1e-8:
self.dep_params = 0
return
# Right bracket point
b_rgt, f_rgt = 0.75, fitfunc(0.75)
while f_rgt < f_ctr:
b_rgt = b_rgt + (1. - b_rgt) / 2
f_rgt = fitfunc(b_rgt)
if b_rgt > 1. - 1e-6:
raise ValueError(
"Autoregressive: unable to find right bracket")
from scipy.optimize import brent
self.dep_params = brent(fitfunc, brack=[b_lft, b_ctr, b_rgt])
def covariance_matrix(self, endog_expval, index):
ngrp = len(endog_expval)
if self.dep_params == 0:
return np.eye(ngrp, dtype=np.float64), True
idx = np.arange(ngrp)
cmat = self.dep_params**np.abs(idx[:, None] - idx[None, :])
return cmat, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
# The inverse of an AR(1) covariance matrix is tri-diagonal.
k = len(expval)
soln = []
# LHS has 1 column
if k == 1:
return [x / stdev**2 for x in rhs]
# LHS has 2 columns
if k == 2:
mat = np.array([[1, -self.dep_params], [-self.dep_params, 1]])
mat /= (1. - self.dep_params**2)
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
else:
x1 = x / stdev[:, None]
x1 = np.dot(mat, x1)
if x.ndim == 1:
x1 /= stdev
else:
x1 /= stdev[:, None]
soln.append(x1)
return soln
# LHS has >= 3 columns: values c0, c1, c2 defined below give
# the inverse. c0 is on the diagonal, except for the first
# and last position. c1 is on the first and last position of
# the diagonal. c2 is on the sub/super diagonal.
c0 = (1. + self.dep_params**2) / (1. - self.dep_params**2)
c1 = 1. / (1. - self.dep_params**2)
c2 = -self.dep_params / (1. - self.dep_params**2)
soln = []
for x in rhs:
flatten = False
if x.ndim == 1:
x = x[:, None]
flatten = True
x1 = x / stdev[:, None]
z0 = np.zeros((1, x.shape[1]))
rhs1 = np.concatenate((x[1:,:], z0), axis=0)
rhs2 = np.concatenate((z0, x[0:-1,:]), axis=0)
y = c0*x + c2*rhs1 + c2*rhs2
y[0, :] = c1*x[0, :] + c2*x[1, :]
y[-1, :] = c1*x[-1, :] + c2*x[-2, :]
y /= stdev[:, None]
if flatten:
y = np.squeeze(y)
soln.append(y)
return soln
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Autoregressive(1) dependence parameter: %.3f\n" %
self.dep_params)
class CategoricalCovStruct(CovStruct):
"""
Parent class for covariance structure for categorical data models.
Attributes
----------
nlevel : int
The number of distinct levels for the outcome variable.
ibd : list
A list whose i^th element ibd[i] is an array whose rows
contain integer pairs (a,b), where endog_li[i][a:b] is the
subvector of binary indicators derived from the same ordinal
value.
"""
def initialize(self, model):
super(CategoricalCovStruct, self).initialize(model)
self.nlevel = len(model.endog_values)
self._ncut = self.nlevel - 1
from numpy.lib.stride_tricks import as_strided
b = np.dtype(np.int64).itemsize
ibd = []
for v in model.endog_li:
jj = np.arange(0, len(v) + 1, self._ncut, dtype=np.int64)
jj = as_strided(jj, shape=(len(jj) - 1, 2), strides=(b, b))
ibd.append(jj)
self.ibd = ibd
class GlobalOddsRatio(CategoricalCovStruct):
"""
Estimate the global odds ratio for a GEE with ordinal or nominal
data.
References
----------
PJ Heagerty and S Zeger. "Marginal Regression Models for Clustered
Ordinal Measurements". Journal of the American Statistical
Association Vol. 91, Issue 435 (1996).
Thomas Lumley. Generalized Estimating Equations for Ordinal Data:
A Note on Working Correlation Structures. Biometrics Vol. 52,
No. 1 (Mar., 1996), pp. 354-361
http://www.jstor.org/stable/2533173
Notes
-----
The following data structures are calculated in the class:
'ibd' is a list whose i^th element ibd[i] is a sequence of integer
pairs (a,b), where endog_li[i][a:b] is the subvector of binary
indicators derived from the same ordinal value.
`cpp` is a dictionary where cpp[group] is a map from cut-point
pairs (c,c') to the indices of all between-subject pairs derived
from the given cut points.
"""
def __init__(self, endog_type):
super(GlobalOddsRatio, self).__init__()
self.endog_type = endog_type
self.dep_params = 0.
def initialize(self, model):
super(GlobalOddsRatio, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for GlobalOddsRatio cov_struct, using unweighted covariance estimate")
# Need to restrict to between-subject pairs
cpp = []
for v in model.endog_li:
# Number of subjects in this group
m = int(len(v) / self._ncut)
i1, i2 = np.tril_indices(m, -1)
cpp1 = {}
for k1 in range(self._ncut):
for k2 in range(k1+1):
jj = np.zeros((len(i1), 2), dtype=np.int64)
jj[:, 0] = i1*self._ncut + k1
jj[:, 1] = i2*self._ncut + k2
cpp1[(k2, k1)] = jj
cpp.append(cpp1)
self.cpp = cpp
# Initialize the dependence parameters
self.crude_or = self.observed_crude_oddsratio()
if self.model.update_dep:
self.dep_params = self.crude_or
def pooled_odds_ratio(self, tables):
"""
Returns the pooled odds ratio for a list of 2x2 tables.
The pooled odds ratio is the inverse variance weighted average
of the sample odds ratios of the tables.
"""
if len(tables) == 0:
return 1.
# Get the sampled odds ratios and variances
log_oddsratio, var = [], []
for table in tables:
lor = np.log(table[1, 1]) + np.log(table[0, 0]) -\
np.log(table[0, 1]) - np.log(table[1, 0])
log_oddsratio.append(lor)
var.append((1 / table.astype(np.float64)).sum())
# Calculate the inverse variance weighted average
wts = [1 / v for v in var]
wtsum = sum(wts)
wts = [w / wtsum for w in wts]
log_pooled_or = sum([w*e for w, e in zip(wts, log_oddsratio)])
return np.exp(log_pooled_or)
def covariance_matrix(self, expected_value, index):
vmat = self.get_eyy(expected_value, index)
vmat -= np.outer(expected_value, expected_value)
return vmat, False
def observed_crude_oddsratio(self):
"""
To obtain the crude (global) odds ratio, first pool all binary
indicators corresponding to a given pair of cut points (c,c'),
then calculate the odds ratio for this 2x2 table. The crude
odds ratio is the inverse variance weighted average of these
odds ratios. Since the covariate effects are ignored, this OR
will generally be greater than the stratified OR.
"""
cpp = self.cpp
endog = self.model.endog_li
# Storage for the contingency tables for each (c,c')
tables = {}
for ii in iterkeys(cpp[0]):
tables[ii] = np.zeros((2, 2), dtype=np.float64)
# Get the observed crude OR
for i in range(len(endog)):
# The observed joint values for the current cluster
yvec = endog[i]
endog_11 = np.outer(yvec, yvec)
endog_10 = np.outer(yvec, 1. - yvec)
endog_01 = np.outer(1. - yvec, yvec)
endog_00 = np.outer(1. - yvec, 1. - yvec)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += endog_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += endog_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += endog_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += endog_00[ix[:, 0], ix[:, 1]].sum()
return self.pooled_odds_ratio(list(itervalues(tables)))
def get_eyy(self, endog_expval, index):
"""
Returns a matrix V such that V[i,j] is the joint probability
that endog[i] = 1 and endog[j] = 1, based on the marginal
probabilities of endog and the global odds ratio `current_or`.
"""
current_or = self.dep_params
ibd = self.ibd[index]
# The between-observation joint probabilities
if current_or == 1.0:
vmat = np.outer(endog_expval, endog_expval)
else:
psum = endog_expval[:, None] + endog_expval[None, :]
pprod = endog_expval[:, None] * endog_expval[None, :]
pfac = np.sqrt((1. + psum * (current_or - 1.))**2 +
4 * current_or * (1. - current_or) * pprod)
vmat = 1. + psum * (current_or - 1.) - pfac
vmat /= 2. * (current_or - 1)
# Fix E[YY'] for elements that belong to same observation
for bdl in ibd:
evy = endog_expval[bdl[0]:bdl[1]]
if self.endog_type == "ordinal":
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(evy, evy)
else:
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] = np.diag(evy)
return vmat
def update(self, params):
"""
Update the global odds ratio based on the current value of
params.
"""
endog = self.model.endog_li
cpp = self.cpp
cached_means = self.model.cached_means
# This will happen if all the clusters have only
# one observation
if len(cpp[0]) == 0:
return
tables = {}
for ii in cpp[0]:
tables[ii] = np.zeros((2, 2), dtype=np.float64)
for i in range(self.model.num_group):
endog_expval, _ = cached_means[i]
emat_11 = self.get_eyy(endog_expval, i)
emat_10 = endog_expval[:, None] - emat_11
emat_01 = -emat_11 + endog_expval
emat_00 = 1. - (emat_11 + emat_10 + emat_01)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += emat_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += emat_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += emat_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += emat_00[ix[:, 0], ix[:, 1]].sum()
cor_expval = self.pooled_odds_ratio(list(itervalues(tables)))
self.dep_params *= self.crude_or / cor_expval
if not np.isfinite(self.dep_params):
self.dep_params = 1.
warnings.warn("dep_params became inf, resetting to 1",
ConvergenceWarning)
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
return "Global odds ratio: %.3f\n" % self.dep_params
class OrdinalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for ordinal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(ev, ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class NominalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for nominal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.diag(ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class Equivalence(CovStruct):
"""
A covariance structure defined in terms of equivalence classes.
An 'equivalence class' is a set of pairs of observations such that
the covariance of every pair within the equivalence class has a
common value.
Parameters
----------
pairs : dict-like
A dictionary of dictionaries, where `pairs[group][label]`
provides the indices of all pairs of observations in the group
that have the same covariance value. Specifically,
`pairs[group][label]` is a tuple `(j1, j2)`, where `j1` and `j2`
are integer arrays of the same length. `j1[i], j2[i]` is one
index pair that belongs to the `label` equivalence class. Only
one triangle of each covariance matrix should be included.
Positions where j1 and j2 have the same value are variance
parameters.
labels : array-like
An array of labels such that every distinct pair of labels
defines an equivalence class. Either `labels` or `pairs` must
be provided. When the two labels in a pair are equal two
equivalence classes are defined: one for the diagonal elements
(corresponding to variances) and one for the off-diagonal
elements (corresponding to covariances).
return_cov : boolean
If True, `covariance_matrix` returns an estimate of the
covariance matrix, otherwise returns an estimate of the
correlation matrix.
Notes
-----
Using `labels` to define the class is much easier than using
`pairs`, but is less general.
Any pair of values not contained in `pairs` will be assigned zero
covariance.
The index values in `pairs` are row indices into the `exog`
matrix. They are not updated if missing data are present. When
using this covariance structure, missing data should be removed
before constructing the model.
If using `labels`, after a model is defined using the covariance
structure it is possible to remove a label pair from the second
level of the `pairs` dictionary to force the corresponding
covariance to be zero.
Examples
--------
The following sets up the `pairs` dictionary for a model with two
groups, equal variance for all observations, and constant
covariance for all pairs of observations within each group.
>> pairs = {0: {}, 1: {}}
>> pairs[0][0] = (np.r_[0, 1, 2], np.r_[0, 1, 2])
>> pairs[0][1] = np.tril_indices(3, -1)
>> pairs[1][0] = (np.r_[3, 4, 5], np.r_[3, 4, 5])
>> pairs[1][2] = 3 + np.tril_indices(3, -1)
"""
def __init__(self, pairs=None, labels=None, return_cov=False):
super(Equivalence, self).__init__()
if (pairs is None) and (labels is None):
raise ValueError("Equivalence cov_struct requires either `pairs` or `labels`")
if (pairs is not None) and (labels is not None):
raise ValueError("Equivalence cov_struct accepts only one of `pairs` and `labels`")
if pairs is not None:
import copy
self.pairs = copy.deepcopy(pairs)
if labels is not None:
self.labels = np.asarray(labels)
self.return_cov = return_cov
def _make_pairs(self, i, j):
"""
Create arrays `i_`, `j_` containing all unique ordered pairs of elements in `i` and `j`.
The arrays `i` and `j` must be one-dimensional containing non-negative integers.
"""
mat = np.zeros((len(i)*len(j), 2), dtype=np.int32)
# Create the pairs and order them
f = np.ones(len(j))
mat[:, 0] = np.kron(f, i).astype(np.int32)
f = np.ones(len(i))
mat[:, 1] = np.kron(j, f).astype(np.int32)
mat.sort(1)
# Remove repeated rows
try:
dtype = np.dtype((np.void, mat.dtype.itemsize * mat.shape[1]))
bmat = np.ascontiguousarray(mat).view(dtype)
_, idx = np.unique(bmat, return_index=True)
except TypeError:
# workaround for old numpy that can't call unique with complex
# dtypes
np.random.seed(4234)
bmat = np.dot(mat, np.random.uniform(size=mat.shape[1]))
_, idx = np.unique(bmat, return_index=True)
mat = mat[idx, :]
return mat[:, 0], mat[:, 1]
def _pairs_from_labels(self):
from collections import defaultdict
pairs = defaultdict(lambda : defaultdict(lambda : None))
model = self.model
df = pd.DataFrame({"labels": self.labels, "groups": model.groups})
gb = df.groupby(["groups", "labels"])
ulabels = np.unique(self.labels)
for g_ix, g_lb in enumerate(model.group_labels):
# Loop over label pairs
for lx1 in range(len(ulabels)):
for lx2 in range(lx1+1):
lb1 = ulabels[lx1]
lb2 = ulabels[lx2]
try:
i1 = gb.groups[(g_lb, lb1)]
i2 = gb.groups[(g_lb, lb2)]
except KeyError:
continue
i1, i2 = self._make_pairs(i1, i2)
clabel = str(lb1) + "/" + str(lb2)
# Variance parameters belong in their own equiv class.
jj = np.flatnonzero(i1 == i2)
if len(jj) > 0:
clabelv = clabel + "/v"
pairs[g_lb][clabelv] = (i1[jj], i2[jj])
# Covariance parameters
jj = np.flatnonzero(i1 != i2)
if len(jj) > 0:
i1 = i1[jj]
i2 = i2[jj]
pairs[g_lb][clabel] = (i1, i2)
self.pairs = pairs
def initialize(self, model):
super(Equivalence, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for equalence cov_struct, using unweighted covariance estimate")
if not hasattr(self, 'pairs'):
self._pairs_from_labels()
# Initialize so that any equivalence class containing a
# variance parameter has value 1.
self.dep_params = defaultdict(lambda : 0.)
self._var_classes = set([])
for gp in self.model.group_labels:
for lb in self.pairs[gp]:
j1, j2 = self.pairs[gp][lb]
if np.any(j1 == j2):
if not np.all(j1 == j2):
warnings.warn("equivalence class contains both variance and covariance parameters")
self._var_classes.add(lb)
self.dep_params[lb] = 1
# Need to start indexing at 0 within each group.
# rx maps olds indices to new indices
rx = -1 * np.ones(len(self.model.endog), dtype=np.int32)
for g_ix, g_lb in enumerate(self.model.group_labels):
ii = self.model.group_indices[g_lb]
rx[ii] = np.arange(len(ii), dtype=np.int32)
# Reindex
for gp in self.model.group_labels:
for lb in self.pairs[gp].keys():
a, b = self.pairs[gp][lb]
self.pairs[gp][lb] = (rx[a], rx[b])
def update(self, params):
endog = self.model.endog_li
varfunc = self.model.family.variance
cached_means = self.model.cached_means
dep_params = defaultdict(lambda : [0., 0., 0.])
n_pairs = defaultdict(lambda : 0)
dim = len(params)
for k, gp in enumerate(self.model.group_labels):
expval, _ = cached_means[k]
stdev = np.sqrt(varfunc(expval))
resid = (endog[k] - expval) / stdev
for lb in self.pairs[gp].keys():
if (not self.return_cov) and lb in self._var_classes:
continue
jj = self.pairs[gp][lb]
dep_params[lb][0] += np.sum(resid[jj[0]] * resid[jj[1]])
if not self.return_cov:
dep_params[lb][1] += np.sum(resid[jj[0]]**2)
dep_params[lb][2] += np.sum(resid[jj[1]]**2)
n_pairs[lb] += len(jj[0])
if self.return_cov:
for lb in dep_params.keys():
dep_params[lb] = dep_params[lb][0] / (n_pairs[lb] - dim)
else:
for lb in dep_params.keys():
den = np.sqrt(dep_params[lb][1] * dep_params[lb][2])
dep_params[lb] = dep_params[lb][0] / den
for lb in self._var_classes:
dep_params[lb] = 1.
self.dep_params = dep_params
self.n_pairs = n_pairs
def covariance_matrix(self, expval, index):
dim = len(expval)
cmat = np.zeros((dim, dim))
g_lb = self.model.group_labels[index]
for lb in self.pairs[g_lb].keys():
j1, j2 = self.pairs[g_lb][lb]
cmat[j1, j2] = self.dep_params[lb]
cmat = cmat + cmat.T
np.fill_diagonal(cmat, cmat.diagonal() / 2)
return cmat, not self.return_cov
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
| bsd-3-clause |
kashif/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
siconos/siconos-deb | examples/Control/train_brakes/train_tslcp.py | 1 | 4764 | """Train brake example
from B. Caillaud, <[email protected]>, Inria, Rennes, September 2013
Time-Stepping with LCP.
Equations are ::
for all i=1...n
j_{i-1} - k_i - l_i - j_i = 0
w_i = u_i + v_i
w_{i-1} - R j_{i-1} - w_i = 0
h_i + S k_i = v_i
u'_i = 1/C (k_i + l_i)
v'_i = 1/D l_i
0 <= k_i complementarity -h_i >= 0
w_0 is a constant
j_n is a constant
"""
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import siconos.kernel as sk
from math import ceil
# == User-defined parameters ==
number_of_cars = 3
t0 = 0.0
T = 10 # Total simulation times
tau = 1e-3 # Time step
tscale = 1.
# Resistors (Ohm)
R = 100.
S = 10.
# Capacitors (Farad)
C = 1000.e-6
D = 100.e-6
# Initial state
u0 = 7.5 # C capacitors voltates (V)
v0 = - u0 # D capacitors voltages (V)
rho = 0.6667 # factor
epsilon = rho * u0 / number_of_cars # increment of capacitor voltages (V)
# Constant perturbation
w0 = 10.0 # Voltage at the head of the brake line
jn = 0.0 # current at the tail of the brake line
# -- Dynamical system --
# *** Linear time-invariant dynamical system (LTIDS) ***
# q' = A.q + b + r with :
#
# q = [ u1 ... u_n v_1 ... v_n ]
# q(t0) = q0 = [ u0 ... u0 v0 ... v0 ]
ndof = 2 * number_of_cars
q0 = np.zeros(ndof, dtype=np.float64)
q0[:number_of_cars] = (1. - rho) * u0 + epsilon * np.arange(number_of_cars)
q0[number_of_cars:] = (1. - rho) * v0 -\
epsilon * np.arange(number_of_cars, ndof)
A = np.zeros((ndof, ndof), dtype=np.float64)
val = -2. / (R * C)
np.fill_diagonal(A[:number_of_cars, :number_of_cars], val)
np.fill_diagonal(A[:number_of_cars, number_of_cars:ndof], val)
val2 = -2. / (R * D)
np.fill_diagonal(A[number_of_cars:ndof, :number_of_cars], val2)
np.fill_diagonal(A[number_of_cars:ndof, number_of_cars:ndof], val2)
A[number_of_cars - 1, number_of_cars - 1] *= 0.5
A[number_of_cars - 1, ndof - 1] *= 0.5
A[ndof - 1, number_of_cars - 1] *= 0.5
A[ndof - 1, ndof - 1] *= 0.5
# extra-diag values
for i in xrange(1, number_of_cars):
A[i, i - 1] = A[i - 1, i] = 1. / (R * C)
A[i, i + number_of_cars - 1] = A[i - 1, i + number_of_cars] = 1. / (R * C)
A[i + number_of_cars, i - 1] = A[i + number_of_cars - 1, i] = 1. / (R * D)
A[i + number_of_cars, i + number_of_cars - 1] = 1. / (R * D)
A[i + number_of_cars - 1, i + number_of_cars] = 1. / (R * D)
b = np.zeros(ndof, np.float64)
b[0] = w0 / (R * C)
b[number_of_cars - 1] = - jn / C
b[number_of_cars] = w0 / (R * D)
b[ndof - 1] = -jn / D
RC = sk.FirstOrderLinearTIDS(q0, A, b)
# -- Interaction --
# *** Linear time invariant relation (LTIR) ***
# y = N.q + M.x and r = B.x
# y = [ -h_1 ... -h_n ]
# x = [ k_1 ... k_n ]
# + complementarity(x,y)
ninter = number_of_cars
B = np.zeros((ndof, ninter), dtype=np.float64)
np.fill_diagonal(B[number_of_cars:, :number_of_cars], -1. / D)
M = np.zeros((ninter, ninter), dtype=np.float64)
np.fill_diagonal(M[:number_of_cars, :number_of_cars], S)
N = np.zeros((ninter, ndof), dtype=np.float64)
np.fill_diagonal(N[:number_of_cars, number_of_cars:], -1.)
relation = sk.FirstOrderLinearTIR(N, B)
relation.setDPtr(M)
nslaw = sk.ComplementarityConditionNSL(ninter)
interaction = sk.Interaction(ninter, nslaw, relation)
# -- The Model --
circuit = sk.Model(t0, T, 'train')
nsds = circuit.nonSmoothDynamicalSystem()
nsds.insertDynamicalSystem(RC)
nsds.link(interaction, RC)
# -- Simulation --
td = sk.TimeDiscretisation(t0, tau)
simu = sk.TimeStepping(td)
# osi
theta = 0.50000000000001
osi = sk.EulerMoreauOSI(theta)
simu.insertIntegrator(osi)
# osns
osnspb = sk.LCP()
simu.insertNonSmoothProblem(osnspb)
circuit.initialize(simu)
# -- Get the values to be plotted --
output_size = 2 * number_of_cars + 1
nb_time_steps = ceil((T - t0) / tau) + 1
data_plot = np.empty((nb_time_steps, output_size))
data_plot[0, 0] = circuit.t0() / tscale
data_plot[0, 1:] = RC.x()
# time loop
k = 1
while simu.hasNextEvent():
# update input
current = 2 * k - nb_time_steps
if current >= 0 and current <= 1:
RC.b()[0] = 0.0
RC.b()[number_of_cars] = 0
simu.computeOneStep()
data_plot[k, 0] = simu.nextTime() / tscale
data_plot[k, 1:] = RC.x()
k += 1
simu.nextStep()
# save to disk
np.savetxt('train_tslcp.dat', data_plot)
def plot_results():
"""Plot 3d curve z = f(x, y)
with ds_state = [x, y, z]
"""
plt.subplot(211)
time = data_plot[:, 0]
q = data_plot[:, 1:number_of_cars + 1]
v = data_plot[:, number_of_cars + 1:]
plt.plot(time, q, label='u')
plt.ylabel('u')
plt.subplot(212)
plt.plot(time, v, label='v')
plt.ylabel('v')
plt.xlabel('time')
plt.savefig('train_brakes.png')
# --- Uncomment lines below to plot interesting stuff ---
plot_results()
| apache-2.0 |
MohammedWasim/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
mrkrd/thorns | thorns/spikes.py | 1 | 8431 | # -*- coding: utf-8 -*-
#
# Copyright 2014 Marek Rudnicki
#
# This file is part of thorns.
#
# thorns is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# thorns is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with thorns. If not, see <http://www.gnu.org/licenses/>.
"""Contains `spike_trains` creation and manipulation functions.
`spike_trains` is a data format to contain physiological action
potentials data and meta information. It is based on `pd.DataFrame`,
which MUST contain two columns *spikes* and *duration*. The values in
*spikes* are numpy arrays of spike timings in seconds. The values in
the *duration* column are the durations of the stimuli also in
seconds.
"""
from __future__ import division, print_function, absolute_import
import random
import numpy as np
import pandas as pd
import sys
from collections import Iterable
from thorns.stats import get_duration
def select_trains(spike_trains, **kwargs):
"""Select trains from `spike_trains` where `kwargs` are equal to the
metadata.
"""
mask = np.ones(len(spike_trains), dtype=bool)
for key,val in kwargs.items():
mask = mask & np.array(spike_trains[key] == val)
selected = spike_trains[mask]
return selected
def make_trains(data, **kwargs):
"""Create spike trains from various data formats.
"""
if 'fs' in kwargs:
assert 'duration' not in kwargs
meta = dict(kwargs)
if 'fs' in meta:
del meta['fs']
if isinstance(data, np.ndarray) and (data.ndim == 2) and ('fs' in kwargs):
trains = _array_to_trains(data, kwargs['fs'], **meta)
elif isinstance(data, dict): # brian like spiketimes (dict of arrays)
# TODO: test this case
arrays = [a for a in data.itervalues()]
trains = _arrays_to_trains(arrays, **meta)
elif ('brian' in sys.modules) and isinstance(data, sys.modules['brian'].SpikeMonitor):
import brian
meta.setdefault('duration', float(data.clock.t/brian.second))
spikes = [np.array(spiketimes/brian.second) for spiketimes in data.spiketimes.itervalues()]
trains = _arrays_to_trains(spikes, **meta)
elif len(data) == 0:
trains = pd.DataFrame(columns=(['spikes'] + meta.keys()))
elif isinstance(data[0], Iterable):
trains = _arrays_to_trains(data, **meta)
else:
raise RuntimeError("Spike train format not supported.")
return trains
def _arrays_to_trains(arrays, **meta):
"""Convert a list of arrays with spike times to spike trains."""
### Make sure we have duration
if 'duration' not in meta:
max_spikes = [np.max(a) for a in arrays if len(a)>0]
if max_spikes:
duration = np.max(max_spikes)
else:
duration = 0
meta['duration'] = float(duration)
else:
if np.isscalar(meta['duration']):
meta['duration'] = float(meta['duration'])
else:
meta['duration'] = np.array(meta['duration']).astype(float)
trains = {'spikes': [np.array(a, dtype=float) for a in arrays]}
trains.update(meta)
trains = pd.DataFrame(trains)
return trains
def _array_to_trains(array, fs, **meta):
"""Convert time functions to a list of spike trains.
Parameters
----------
array : array_like
Input array.
fs : float
Samping frequency in Hz.
Returns
-------
spike_trains
"""
assert array.ndim == 2
trains = []
for a in array.T:
a = a.astype(int)
t = np.arange(len(a)) / fs
spikes = np.repeat(t, a)
trains.append( spikes )
assert 'duration' not in meta
meta['duration'] = len(array)/fs
spike_trains = _arrays_to_trains(
trains,
**meta
)
return spike_trains
def trains_to_array(spike_trains, fs):
"""Convert `spike_trains` to 2D array (signals) with samlping
frequency `fs`.
"""
duration = get_duration(spike_trains)
nbins = int(np.ceil(duration * fs))
tmax = nbins / fs
signals = []
for spikes in spike_trains['spikes']:
signal, bin_edges = np.histogram(
spikes,
bins=nbins,
range=(0, tmax)
)
signals.append(
signal
)
signals = np.array(signals).T
return signals
def accumulate(spike_trains, ignore=None, keep=None):
"""Concatenate spike trains with the same meta data. Trains will
be sorted by the metadata.
"""
assert None in (ignore, keep)
keys = spike_trains.columns.tolist()
if ignore is not None:
for k in ignore:
keys.remove(k)
if keep is not None:
keys = keep
if 'duration' not in keys:
keys.append('duration')
if 'spikes' in keys:
keys.remove('spikes')
groups = spike_trains.groupby(keys, as_index=False)
acc = []
for name,group in groups:
if not isinstance(name, tuple):
name = (name,)
spikes = np.concatenate(tuple(group['spikes']))
acc.append(name + (spikes,))
columns = list(keys)
columns.append('spikes')
acc = pd.DataFrame(acc, columns=columns)
return acc
def trim(spike_trains, start=0, stop=None):
"""Trim the spike trains.
Remove all spikes outside of the (`start`, `stop`) range.
"""
tmin = float(start)
if stop is None:
tmaxs = spike_trains['duration']
else:
tmaxs = np.ones(len(spike_trains)) * stop
assert np.all(tmin < tmaxs)
trimmed = []
for train,tmax in zip(spike_trains['spikes'], tmaxs):
spikes = train[(train >= tmin) & (train <= tmax)]
spikes -= tmin
trimmed.append(spikes)
durations = np.array(spike_trains['duration'])
durations[ np.array(durations>tmaxs) ] = tmaxs[ np.array(durations>tmaxs) ]
durations -= tmin
out = spike_trains.copy()
out['spikes'] = trimmed
out['duration'] = durations
return out
# def remove_empty(spike_trains):
# new_trains = []
# for train in spike_trains:
# if len(train) != 0:
# new_trains.append(train)
# return new_trains
def fold(spike_trains, period):
"""Fold `spike_trains` by `period`."""
# data = {key:[] for key in spike_trains.dtype.names}
rows = []
for i,row in spike_trains.iterrows():
period_num = int( np.ceil(row['duration'] / period) )
last_period = np.fmod(row['duration'], period)
spikes = row['spikes']
for idx in range(period_num):
lo = idx * period
hi = (idx+1) * period
sec = spikes[(spikes>=lo) & (spikes<hi)]
sec = np.fmod(sec, period)
r = row.copy()
r['spikes'] = sec
r['duration'] = period
rows.append(r)
if last_period > 0:
rows[-1]['duration'] = last_period
folded_trains = pd.DataFrame(rows)
folded_trains = folded_trains.reset_index(drop=True)
return folded_trains
# def concatenate_spikes(spike_trains):
# return [np.concatenate(tuple(spike_trains))]
# concatenate = concatenate_spikes
# concat = concatenate_spikes
# def shift_spikes(spike_trains, shift):
# shifted = [train+shift for train in spike_trains]
# return shifted
# shift = shift_spikes
# def split_and_fold_trains(spike_trains,
# silence_duration,
# tone_duration,
# pad_duration,
# remove_pads):
# silence = trim(spike_trains, 0, silence_duration)
# tones_and_pads = trim(spike_trains, silence_duration)
# tones_and_pads = fold(tones_and_pads, tone_duration+pad_duration)
# # import plot
# # plot.raster(tones_and_pads).show()
# if remove_pads:
# tones_and_pads = trim(tones_and_pads, 0, tone_duration)
# return silence, tones_and_pads
# split_and_fold = split_and_fold_trains
| gpl-3.0 |
cowlicks/numpy | numpy/fft/fftpack.py | 14 | 42176 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft)
def ifft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.