repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
I2Cvb/prostate | scratch/metabolite_fitting.py | 1 | 15001 | from __future__ import division
import numpy as np
from joblib import Parallel, delayed
from scipy.special import wofz
from scipy.optimize import curve_fit
from scipy.sparse import spdiags
from scipy.sparse import lil_matrix
from scipy.sparse.linalg import spsolve
from scipy.interpolate import interp1d
from scipy.stats import norm
from scipy.optimize import least_squares
from statsmodels.nonparametric.smoothers_lowess import lowess
from gmr import GMM
from protoclass.data_management import RDAModality
from protoclass.preprocessing import MRSIPhaseCorrection
from protoclass.preprocessing import MRSIFrequencyCorrection
from protoclass.preprocessing import MRSIBaselineCorrection
from fdasrsf import srsf_align
import matplotlib.pyplot as plt
path_mrsi = '/data/prostate/experiments/Patient 1036/MRSI/CSI_SE_3D_140ms_16c.rda'
def _find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return idx
def _gaussian_profile(x, alpha, mu, sigma):
return alpha * norm.pdf(x, loc=mu, scale=sigma)
def _voigt_profile(x, alpha, mu, sigma, gamma):
"""Private function to fit a Voigt profile.
Parameters
----------
x : ndarray, shape (len(x))
The input data.
alpha : float,
The amplitude factor.
mu : float,
The shift of the central value.
sigma : float,
sigma of the Gaussian.
gamma : float,
gamma of the Lorentzian.
Returns
-------
y : ndarray, shape (len(x), )
The Voigt profile.
"""
# Define z
z = ((x - mu) + 1j * gamma) / (sigma * np.sqrt(2))
# Compute the Faddeva function
w = wofz(z)
return alpha * (np.real(w)) / (sigma * np.sqrt(2. * np.pi))
def _ch_sp_cr_cit_model(x,
mu, sigma_1, gamma_1, alpha_1,
delta_2, sigma_2, gamma_2, alpha_2,
delta_3, sigma_3, gamma_3, alpha_3,
delta_4, sigma_4, gamma_4, alpha_4,
delta_5, sigma_5, gamma_5, alpha_5,
delta_6, sigma_6, gamma_6, alpha_6):
"""Private function to create the mixute of Voigt profiles."""
signal = _voigt_profile(x, alpha_1, mu, sigma_1, gamma_1)
signal += _voigt_profile(x, alpha_2, mu + delta_2, sigma_2, gamma_2)
signal += _voigt_profile(x, alpha_3, mu - delta_3, sigma_3, gamma_3)
signal += _voigt_profile(x, alpha_4, mu + delta_4, sigma_4, gamma_4)
signal += _voigt_profile(x, alpha_5, mu + delta_4 - delta_5, sigma_5,
gamma_5)
signal += _voigt_profile(x, alpha_6, mu + delta_4 - delta_6, sigma_6,
gamma_6)
return signal
def _cit_model(x,
mu, sigma_1, gamma_1, alpha_1,
delta_2, sigma_2, gamma_2, alpha_2,
delta_3, sigma_3, gamma_3, alpha_3):
"""Private function to create the mixute of Voigt profiles."""
signal = _voigt_profile(x, alpha_1, mu, sigma_1, gamma_1)
signal += _voigt_profile(x, alpha_2, mu + delta_2, sigma_2, gamma_2)
signal += _voigt_profile(x, alpha_3, mu - delta_3, sigma_3, gamma_3)
return signal
def voigt(ppm, x):
signal = _voigt_profile(ppm, x[3], x[0], x[1], x[2])
signal += _voigt_profile(ppm, x[7], x[0] + x[4], x[5], x[6])
signal += _voigt_profile(ppm, x[11], x[0] - x[8], x[9], x[10])
signal += _voigt_profile(ppm, x[15], x[0] + x[12], x[13], x[14])
signal += _voigt_profile(ppm, x[19], x[0] + x[12] - x[16], x[17], x[18])
signal += _voigt_profile(ppm, x[23], x[0] + x[12] - x[20], x[21], x[22])
return signal
def ls_voigt(x, ppm, y):
signal = _voigt_profile(ppm, x[3], x[0], x[1], x[2])
signal += _voigt_profile(ppm, x[7], x[0] + x[4], x[5], x[6])
signal += _voigt_profile(ppm, x[11], x[0] - x[8], x[9], x[10])
signal += _voigt_profile(ppm, x[15], x[0] + x[12], x[13], x[14])
signal += _voigt_profile(ppm, x[19], x[0] + x[12] - x[16], x[17], x[18])
signal += _voigt_profile(ppm, x[23], x[0] + x[12] - x[20], x[21], x[22])
return signal - y
def gauss(ppm, x):
signal = _gaussian_profile(ppm, x[2], x[0], x[1])
signal += _gaussian_profile(ppm, x[5], x[0] + x[3], x[4])
signal += _gaussian_profile(ppm, x[8], x[0] - x[6], x[7])
return signal
def ls_gauss(x, ppm, y):
signal = _gaussian_profile(ppm, x[2], x[0], x[1])
signal += _gaussian_profile(ppm, x[5], x[0] + x[3], x[4])
signal += _gaussian_profile(ppm, x[8], x[0] - x[6], x[7])
return signal - y
def _cit_gaussian_model(x,
mu, sigma_1, alpha_1,
delta_2, sigma_2, alpha_2,
delta_3, sigma_3, alpha_3):
"""Private function to create the mixute of Voigt profiles."""
signal = _gaussian_profile(x, alpha_1, mu, sigma_1)
#signal += _gaussian_profile(x, alpha_2, mu + delta_2, sigma_2)
#signal += _gaussian_profile(x, alpha_3, mu - delta_3, sigma_3)
return signal
def _ch_sp_cr_cit_fitting(ppm, spectrum):
"""Private function to fit a mixture of Voigt profile to
choline, spermine, creatine, and citrate metabolites.
"""
ppm_limits = (2.35, 3.25)
idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
ppm < ppm_limits[1]))
sub_ppm = ppm[idx_ppm]
sub_spectrum = spectrum[idx_ppm]
f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)
# Define the default parameters
# Define the default shifts
mu_dft = 2.56
delta_2_dft = .14
delta_3_dft = .14
delta_4_dft = .58
delta_5_dft = .12
delta_6_dft = .16
# Define their bounds
mu_bounds = (2.54, 2.68)
delta_2_bounds = (.08, .17)
delta_3_bounds = (.08, .17)
delta_4_bounds = (.55, .61)
delta_5_bounds = (.11, .13)
delta_6_bounds = (.13, .17)
# Define the default amplitude
alpha_1_dft = (f(mu_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_2_dft = (f(mu_dft + delta_2_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_3_dft = (f(mu_dft - delta_3_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_4_dft = (f(mu_dft + delta_4_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_5_dft = (f(mu_dft + delta_4_dft - delta_5_dft) /
_voigt_profile(0, 1., 0., .001, .001))
alpha_6_dft = (f(mu_dft + delta_4_dft - delta_6_dft) /
_voigt_profile(0, 1., 0., .001, .001))
# Create the vector for the default parameters
popt_default = [mu_dft, .001, .001, alpha_1_dft,
delta_2_dft, .001, .001, alpha_2_dft,
delta_3_dft, .001, .001, alpha_3_dft,
delta_4_dft, .001, .001, alpha_4_dft,
delta_5_dft, .001, .001, alpha_5_dft,
delta_6_dft, .001, .001, alpha_6_dft]
# Define the bounds properly
param_bounds = ([mu_bounds[0], 0., 0., 0.,
delta_2_bounds[0], 0., 0., 0.,
delta_3_bounds[0], 0., 0., 0.,
delta_4_bounds[0], 0., 0., 0.,
delta_5_bounds[0], 0., 0., 0.,
delta_6_bounds[0], 0., 0., 0.],
[mu_bounds[1], np.inf, np.inf, np.inf,
delta_2_bounds[1], np.inf, np.inf, np.inf,
delta_3_bounds[1], np.inf, np.inf, np.inf,
delta_4_bounds[1], np.inf, np.inf, np.inf,
delta_5_bounds[1], np.inf, np.inf, np.inf,
delta_6_bounds[1], np.inf, np.inf, np.inf])
try:
popt, _ = curve_fit(_ch_sp_cr_cit_model, ppm_interp,
f(ppm_interp),
p0=popt_default, bounds=param_bounds)
except RuntimeError:
popt = popt_default
return popt
def _cit_fitting(ppm, spectrum):
"""Private function to fit a mixture of Voigt profile to
citrate metabolites.
"""
ppm_limits = (2.35, 2.85)
idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
ppm < ppm_limits[1]))
sub_ppm = ppm[idx_ppm]
sub_spectrum = spectrum[idx_ppm]
f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)
# Define the default parameters
# Define the default shifts
mu_dft = 2.56
delta_2_dft = .14
delta_3_dft = .14
# Define their bounds
mu_bounds = (2.54, 2.68)
delta_2_bounds = (.08, .17)
delta_3_bounds = (.08, .17)
# Define the default amplitude
alpha_1_dft = (f(mu_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_2_dft = (f(mu_dft + delta_2_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_3_dft = (f(mu_dft - delta_3_dft) /
_voigt_profile(0., 1., 0., .001, .001))
# Create the vector for the default parameters
popt_default = [mu_dft, .001, .001, alpha_1_dft,
delta_2_dft, .001, .001, alpha_2_dft,
delta_3_dft, .001, .001, alpha_3_dft]
# Define the bounds properly
param_bounds = ([mu_bounds[0], 0., 0., 0.,
delta_2_bounds[0], 0., 0., 0.,
delta_3_bounds[0], 0., 0., 0.],
[mu_bounds[1], np.inf, np.inf, np.inf,
delta_2_bounds[1], np.inf, np.inf, np.inf,
delta_3_bounds[1], np.inf, np.inf, np.inf])
try:
popt, _ = curve_fit(_cit_model, ppm_interp,
f(ppm_interp),
p0=popt_default, bounds=param_bounds)
except RuntimeError:
popt = popt_default
return popt
def _cit_gaussian_fitting(ppm, spectrum):
"""Private function to fit a mixture of Voigt profile to
citrate metabolites.
"""
ppm_limits = (2.35, 3.25)
idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
ppm < ppm_limits[1]))
sub_ppm = ppm[idx_ppm]
sub_spectrum = spectrum[idx_ppm]
f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)
# Define the default parameters
# Define the default shifts
mu_dft = 2.56
delta_2_dft = .14
delta_3_dft = .14
delta_4_dft = .58
delta_5_dft = .12
delta_6_dft = .16
# Define their bounds
mu_bounds = (2.54, 2.68)
delta_2_bounds = (.12, .16)
delta_3_bounds = (.12, .16)
delta_4_bounds = (.55, .61)
delta_5_bounds = (.11, .13)
delta_6_bounds = (.13, .17)
# # Define the default amplitude
# alpha_1_dft = (f(mu_dft) /
# _gaussian_profile(0., 1., 0., .01))
# alpha_2_dft = (f(mu_dft + delta_2_dft) /
# _gaussian_profile(0., 1., 0., .01))
# alpha_3_dft = (f(mu_dft - delta_3_dft) /
# _gaussian_profile(0., 1., 0., .01))
# # Create the vector for the default parameters
# popt_default = [mu_dft, .01, alpha_1_dft,
# delta_2_dft, .01, alpha_2_dft,
# delta_3_dft, .01, alpha_3_dft]
# # Define the bounds properly
# param_bounds = ([mu_bounds[0], 0., 0.,
# delta_2_bounds[0], 0., 0.,
# delta_3_bounds[0], 0., 0.],
# [mu_bounds[1], np.inf, np.inf,
# delta_2_bounds[1], np.inf, np.inf,
# delta_3_bounds[1], np.inf, np.inf])
# Define the default amplitude
alpha_1_dft = (f(mu_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_2_dft = (f(mu_dft + delta_2_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_3_dft = (f(mu_dft - delta_3_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_4_dft = (f(mu_dft + delta_4_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_5_dft = (f(mu_dft + delta_4_dft - delta_5_dft) /
_voigt_profile(0, 1., 0., .001, .001))
alpha_6_dft = (f(mu_dft + delta_4_dft - delta_6_dft) /
_voigt_profile(0, 1., 0., .001, .001))
# Create the vector for the default parameters
popt_default = [mu_dft, .001, .001, alpha_1_dft,
delta_2_dft, .001, .001, alpha_2_dft,
delta_3_dft, .001, .001, alpha_3_dft,
delta_4_dft, .001, .001, alpha_4_dft,
delta_5_dft, .001, .001, alpha_5_dft,
delta_6_dft, .001, .001, alpha_6_dft]
# Define the bounds properly
param_bounds = ([mu_bounds[0], 0., 0., 0.,
delta_2_bounds[0], 0., 0., 0.,
delta_3_bounds[0], 0., 0., 0.,
delta_4_bounds[0], 0., 0., 0.,
delta_5_bounds[0], 0., 0., 0.,
delta_6_bounds[0], 0., 0., 0.],
[mu_bounds[1], np.inf, np.inf, np.inf,
delta_2_bounds[1], np.inf, np.inf, np.inf,
delta_3_bounds[1], np.inf, np.inf, np.inf,
delta_4_bounds[1], np.inf, np.inf, np.inf,
delta_5_bounds[1], np.inf, np.inf, np.inf,
delta_6_bounds[1], np.inf, np.inf, np.inf])
# # Create the vector for the default parameters
# popt_default = np.array([mu_dft, .01, alpha_1_dft])
# # Define the bounds properly
# param_bounds = (np.array([mu_bounds[0], 0., 0.]),
# np.array([mu_bounds[1], np.inf, np.inf]))
# try:
# popt, _ = curve_fit(_cit_gaussian_model, ppm_interp,
# f(ppm_interp),
# p0=popt_default)#, bounds=param_bounds)
# except RuntimeError:
# popt = popt_default
res_robust = least_squares(ls_voigt, popt_default,
loss='huber', f_scale=.1,
bounds=param_bounds,
args=(ppm_interp, f(ppm_interp)))
return res_robust.x
rda_mod = RDAModality(1250.)
rda_mod.read_data_from_path(path_mrsi)
phase_correction = MRSIPhaseCorrection(rda_mod)
rda_mod = phase_correction.transform(rda_mod)
freq_correction = MRSIFrequencyCorrection(rda_mod)
rda_mod = freq_correction.fit(rda_mod).transform(rda_mod)
baseline_correction = MRSIBaselineCorrection(rda_mod)
rda_mod = baseline_correction.fit(rda_mod).transform(rda_mod)
x = 9
y = 5
z = 5
# out = _cit_gaussian_fitting(rda_mod.bandwidth_ppm[:, y, x, z],
# np.real(rda_mod.data_[:, y, x, z]))
ppm = rda_mod.bandwidth_ppm[:, y, x, z]
spectrum = np.real(rda_mod.data_[:, y, x, z])
ppm_limits = (2.35, 2.85)
idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
ppm < ppm_limits[1]))
sub_ppm = ppm[idx_ppm]
sub_spectrum = spectrum[idx_ppm]
| mit |
fraser-lab/EMRinger | Figures/S6/S6C.py | 1 | 7018 | # Rolling rotamericity metric
########################################################################
# Package imports
import numpy as np
import math
from emringer import ringer_chi, ringer_residue
from libtbx import easy_pickle
import matplotlib.pyplot as plt
import argparse
from matplotlib import rcParams
from itertools import count, groupby
rcParams['figure.autolayout'] = True
rcParams['xtick.labelsize'] = 16
rcParams['ytick.labelsize'] = 16
rcParams['axes.labelsize'] = 24
rcParams['axes.titlesize'] = 24
Residue_codes = ["ARG","ASN","ASP","CYS","GLU","GLN","HIS",
"LEU","LYS","MET","PHE","SER","TRP","TYR","SEC","PYL"]
Branched_residues = ["THR","VAL","ILE"]
No_c_gamma = ["ALA", "GLY"]
Weird = ["PRO"]
########################################################################
# Argument Parsing
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--file_a", dest="filename_a",
help='Filename of unrefined pkl file')
parser.add_argument("-b", "--file_b", dest="filename_b",
help='Filename of refined pkl file')
parser.add_argument("-o", dest="offset", type=int, default=0)
parser.add_argument("-t", "--threshold", dest="threshold",
help='Threshold cutoff for rho density',
nargs='?', type = float, default=0)
parser.add_argument("-w", "--extension_around_center", dest = "extension",
help='Number of amino acids to extend around the center in both directions. \
The total window will therefore be twice this number plus one for the center.'
, nargs="?", type=int, default=10)
parser.add_argument("--percent_passing_cutoff", dest = "thresholded_cutoff",
help='Minimum %% passing threshold to flag as a bad region...'
, nargs="?", type=float, default=0.8)
parser.add_argument("--rotamericity_cutoff", dest = "rotamer_cutoff",
help='Maximum rotamericity to be flagged.'
, nargs="?", type=float, default=0.5)
parser.add_argument("--display", dest = "graph", action='store_true')
parser.add_argument("--no_save", dest = "save", action='store_false')
parser.add_argument("-r", "--rel", dest = "rel", action='store_true')
parser.set_defaults(rel=False, graph=False, save=True)
args = parser.parse_args()
class RingerDict(object):
'''Ringerdict: A dictionary accessible form of the output of ringer'''
def __init__(self, resultlist, offset):
self.dict = {}
for residue in resultlist:
if residue.resname in Residue_codes:
residue.resid = int(residue.resid)+offset
self.add_residue(residue)
def add_residue(self, residue):
if residue.chain_id not in self.dict.keys():
self.dict[residue.chain_id] = {}
if 1 in residue._angles.keys():
self.dict[residue.chain_id][residue.resid] = residue._angles[1]
def get_peak(self, chain_id, residue_id):
if (chain_id in self.dict.keys() and residue_id in self.dict[chain_id].keys()):
return self.dict[chain_id][residue_id]
else:
return None
def get_chains(self):
return self.dict.keys()
def get_residues(self, chain_id):
return sorted(self.dict[chain_id].keys())
def ranges(p):
q = sorted(p)
i = 0
for j in xrange(1,len(q)):
if q[j] > 1+q[j-1]:
yield (q[i],q[j-1])
i = j
yield (q[i], q[-1])
def identify_regions(results):
for chain, chain_out in results.iteritems():
outliers = []
print "For Chain %s:" % chain
for k in chain_out:
if (np.divide(k[2],k[1]) > args.thresholded_cutoff) and (np.divide(k[3],k[2]) < args.rotamer_cutoff):
for i in range(k[0]-args.extension, k[0]+args.extension):
outliers.append(i)
if len(outliers) > 0:
print list(ranges(outliers))
print ""
else:
print "No outliers at this threshold \n"
def main():
ringer_results = easy_pickle.load(args.filename_a)
hierarchy = RingerDict(ringer_results, 0)
results_a = {}
for chain in hierarchy.get_chains():
results_a[chain] = []
# Results will be a list of tuples of the form residue number,
# number checked in window, number passing threshold in window,
# number deviating in window.
for i in hierarchy.get_residues(chain):
total_n = 0.0
threshold_n = 0.0
# threshold_deviation = 0
n_deviate = 0.0
for j in range(-args.extension, args.extension+1):
chi = hierarchy.get_peak(chain, int(i)+j)
if chi:
total_n += 1
if args.rel:
if chi.relrho > args.threshold:
threshold_n += 1
if chi.deviation <= 30:
n_deviate += 1
else:
if chi.peakrho > args.threshold:
threshold_n += 1
if chi.deviation <= 30:
n_deviate += 1
results_a[chain].append((i, total_n, threshold_n, n_deviate))
ringer_results = easy_pickle.load(args.filename_b)
hierarchy = RingerDict(ringer_results, args.offset)
results_b = {}
for chain in hierarchy.get_chains():
results_b[chain] = []
# Results will be a list of tuples of the form residue number,
# number checked in window, number passing threshold in window,
# number deviating in window.
for i in hierarchy.get_residues(chain):
total_n = 0.0
threshold_n = 0.0
# threshold_deviation = 0
n_deviate = 0.0
for j in range(-args.extension, args.extension+1):
chi = hierarchy.get_peak(chain, int(i)+j)
if chi:
total_n += 1
if args.rel:
if chi.relrho > args.threshold:
threshold_n += 1
if chi.deviation <= 30:
n_deviate += 1
else:
if chi.peakrho > args.threshold:
threshold_n += 1
if chi.deviation <= 30:
n_deviate += 1
results_b[chain].append((i, total_n, threshold_n, n_deviate))
# identify_regions(results)
if args.graph or args.save:
plot_results(results_a, results_b)
def plot_results(results_a,results_b):
for chain in results_a.keys():
fig, ax = plt.subplots()
# plt.title("Rolling window - Chain %s" % chain)
x_a = [k[0] for k in results_a[chain]]
x_b = [k[0] for k in results_b[chain]]
y_a = [np.divide(k[3],k[2]) for k in results_a[chain]]
y_b = [np.divide(k[3],k[2]) for k in results_b[chain]]
# plt.plot(x, y_1, 'b-', label = "Percent passing threshold",linewidth=3.0, alpha=0.7)
plt.plot(x_a, y_a, label = "Unrefined",linewidth=3.0, alpha=0.9)
plt.plot(x_b, y_b, label = "Refined",linewidth=3.0, alpha=0.9, color='#60BD68')
# plt.xlim(381,695)
plt.xlabel("Center Residue of 21-Residue Window", labelpad=10)
plt.ylabel("Fraction Rotameric Residues", labelpad=10)
plt.ylim(0,1)
plt.legend(loc=4)
ax.yaxis.set_ticks_position('left') # this one is optional but I still recommend it...
ax.xaxis.set_ticks_position('bottom')
if args.graph:
fig.show()
if args.save:
output = args.filename_a[:-4] + "_" + chain + "_rolling.png"
fig.savefig(output)
if __name__ == "__main__":
main()
| bsd-3-clause |
petosegan/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
asurunis/CrisisMappingToolkit | bin/lake_measure.py | 1 | 13229 | # -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import logging
logging.basicConfig(level=logging.ERROR)
try:
import cmt.ee_authenticate
except:
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import cmt.ee_authenticate
cmt.ee_authenticate.initialize()
import matplotlib
matplotlib.use('tkagg')
import sys
import argparse
import functools
import time
import threading
import os
import os.path
from pprint import pprint
import ee
def get_image_collection(bounds, start_date, end_date):
'''Retrieve Landsat 5 imagery for the selected location and dates'''
#ee_bounds = apply(ee.Geometry.Rectangle, bounds)
#ee_points = map(ee.Geometry.Point, [(bounds[0], bounds[1]), (bounds[0], bounds[3]),
# (bounds[2], bounds[1]), (bounds[2], bounds[3])])
ee_bounds = bounds
ee_points = ee.List(bounds.bounds().coordinates().get(0))
points = ee_points.getInfo()
points = map(functools.partial(apply, ee.Geometry.Point), points)
collection = ee.ImageCollection('LT5_L1T').filterDate(start_date, end_date).filterBounds(points[0]).filterBounds(points[1]).filterBounds(points[2]).filterBounds(points[3])
return collection
def detect_clouds(im):
'''Cloud detection algorithm for Landsat 5 data'''
cloud_mask = im.select(['B3']).gte(35).select(['B3'], ['cloud']).And(im.select(['B6']).lte(120))
NDSI = (im.select(['B2']).subtract(im.select(['B5']))).divide(im.select(['B2']).add(im.select(['B5'])))
# originally 0.4, but this supposedly misses some clouds, used 0.7 in paper
# be conservative
#cloud_mask = cloud_mask.And(NDSI.lte(0.7))
# should be 300K temperature, what is this in pixel values?
return cloud_mask
def detect_water(image, clouds):
'''Water detection algorithm for Landsat 5 data'''
# from "Water Body Detection and Delineation with Landsat TM Data" by Frazier and Page
water = image.select(['B1']).lte(100).And(image.select(['B2']).lte(55)).And(image.select(['B3']).lte(71))
water = water.select(['B1'], ['water'])
# originally B7 <= 13
water = water.And(image.select(['B4']).lte(66)).And(image.select(['B5']).lte(47)).And(image.select(['B7']).lte(20))
# original was B1 57, 23, 21, 14
water = water.And(image.select(['B1']).gte(30))#.And(image.select(['B2']).gte(10)).And(image.select(['B3']).gte(8))
#water = water.And(image.select(['B4']).gte(5)).And(image.select(['B5']).gte(2)).And(image.select(['B7']).gte(1))
water = water.And(clouds.Not())
return water
def count_water_and_clouds(bounds, image):
'''Calls the water and cloud detection algorithms on an image and packages the results'''
clouds = detect_clouds(image)
water = detect_water(image, clouds)
cloud_count = clouds.mask(clouds).reduceRegion(ee.Reducer.count(), bounds, 30)
water_count = water.mask(water ).reduceRegion(ee.Reducer.count(), bounds, 30)
#addToMap(ee.Algorithms.ConnectedComponentLabeler(water, ee.Kernel.square(1), 256))
return ee.Feature(None, {'date' : image.get('DATE_ACQUIRED'),
'water_count' : water_count.get('water'),
'cloud_count' : cloud_count.get('cloud')})
def measure_clouds(image):
return ee.Feature(None, {'value' : 5.0})
def parse_lake_data(filename):
'''Read in an output file generated by this program'''
f = open(filename, 'r')
# take values with low cloud cover
f.readline() # Skip first header line
f.readline() # Skip nation infor
f.readline() # Skip second header line
results = dict()
for l in f: # Loop through each line of the file
parts = l.split(',')
date = parts[0].strip() # Image date
satellite = parts[1].strip() # Observing satellite
cloud = int(parts[2]) # Clound count
water = int(parts[3]) # Water count
sun_elevation = float(parts[4])
if not results.has_key(satellite):
results[satellite] = dict()
results[satellite][date] = (cloud, water, sun_elevation)
f.close()
return results
def process_lake(lake, ee_lake, start_date, end_date, output_directory):
'''Computes lake statistics over a date range and writes them to a log file'''
# Extract the lake name (required!)
name = lake['properties']['LAKE_NAME']
if name == '':
return
# Set the output file path and load the file if it already exists
output_file_name = os.path.join(output_directory, name + '.txt')
data = None
if os.path.exists(output_file_name):
data = parse_lake_data(output_file_name)
# Open the output file for writing and fill in the header lines
f = open(output_file_name, 'w')
country = lake['properties']['COUNTRY']
area = lake['properties']['AREA_SKM']
#print '%s, %s, %s' % (name, country, area)
f.write('# Name Country Area in km^2\n')
f.write('%s, %s, %s\n' % (name, country, area))
f.write('# Date, Satellite, Cloud Pixels, Water Pixels, Sun Elevation\n')
# If the file already existed and we loaded data from it,
# re-write the data back in to the new output file.
if data != None:
for sat in sorted(data.keys()):
for date in sorted(data[sat].keys()):
f.write('%s, %10s, %10d, %10d, %.5g\n' % (date, sat, data[sat][date][0], data[sat][date][1], data[sat][date][2]))
try:
# Take the lake boundary and expand it out in all directions by 1000 meters
ee_bounds = ee_lake.geometry().buffer(1000)
# Fetch all the landsat 5 imagery covering the lake on the date range
collection = get_image_collection(ee_bounds, start_date, end_date)
v = collection.toList(1000000)
except:
print >> sys.stderr, 'Failed to allocate memory to expand buffer for lake %s, skipping.' % (name)
f.close()
return
# Iterate through all the images we retrieved
results = []
all_images = v.getInfo()
for i in range(len(all_images)):
# If we already loaded data that contained results for this image, don't re-process it!
if ( (data != None) and ('Landsat 5' in data.keys()) and
(all_images[i]['properties']['DATE_ACQUIRED'] in data['Landsat 5']) ):
continue
# Retrieve the image data and fetch the sun elevation (suggests the amount of light present)
im = ee.Image(v.get(i))
sun_elevation = all_images[i]['properties']['SUN_ELEVATION']
# Call processing algorithms on the lake with second try in case EE chokes.
try:
r = count_water_and_clouds(ee_bounds, im).getInfo()['properties']
except Exception as e:
print >> sys.stderr, 'Failure counting water...trying again. ' + str(e)
time.sleep(5)
r = count_water_and_clouds(ee_bounds, im).getInfo()['properties']
# Write the processing results to a new line in the file
output = '%s, %10s, %10d, %10d, %.5g' % (r['date'], 'Landsat 5', r['cloud_count'], r['water_count'], sun_elevation)
print '%15s %s' % (name, output)
f.write(output + '\n')
results.append(r)
f.close() # Finished processing images, close up the file.
# --- Global variables that govern the parallel threads ---
NUM_SIMULTANEOUS_THREADS = 8
global_semaphore = threading.Semaphore(NUM_SIMULTANEOUS_THREADS)
thread_lock = threading.Lock()
total_threads = 0
class LakeThread(threading.Thread):
'''Helper class to manage the number of active lake processing threads'''
def __init__(self, args):
threading.Thread.__init__(self)
self.setDaemon(True)
self.args = args
# Increase the global thread count by one
thread_lock.acquire()
global total_threads
total_threads += 1
thread_lock.release()
# Start processing
self.start()
def run(self):
# Wait for an open thread spot, then begin processing.
global_semaphore.acquire()
try:
apply(process_lake, self.args)
except Exception as e:
print >> sys.stderr, e
global_semaphore.release()
# Finished processing, decrement the global thread count.
thread_lock.acquire()
global total_threads
total_threads -= 1
thread_lock.release()
#======================================================================================================
# main()
parser = argparse.ArgumentParser(description='Measure lake water levels.')
parser.add_argument('--date', dest='date', action='store', required=False, default=None)
parser.add_argument('--lake', dest='lake', action='store', required=False, default=None)
parser.add_argument('--results_dir', dest='results_dir', action='store', required=False, default='results')
args = parser.parse_args()
if args.date == None:
start_date = ee.Date('1984-01-01')
end_date = ee.Date('2030-01-01')
else:
start_date = ee.Date(args.date)
end_date = start_date.advance(1.0, 'month')
#start_date = ee.Date('2011-06-01') # lake high
#start_date = ee.Date('1993-07-01') # lake low
#start_date = ee.Date('1993-06-01') # lake low but some jet streams
# --- This is the database containing all the lake locations!
#all_lakes = ee.FeatureCollection('ft:13s-6qZDKWXsLOWyN7Dap5o6Xuh2sehkirzze29o3', "geometry").toList(1000000)
if args.lake != None:
all_lakes = ee.FeatureCollection('ft:13s-6qZDKWXsLOWyN7Dap5o6Xuh2sehkirzze29o3', "geometry").filterMetadata(u'LAKE_NAME', u'equals', args.lake).toList(1000000)
else:
#bounds = ee.Geometry.Rectangle(-125.29, 32.55, -114.04, 42.02)
#all_lakes = ee.FeatureCollection('ft:13s-6qZDKWXsLOWyN7Dap5o6Xuh2sehkirzze29o3', "geometry").filterBounds(bounds).toList(1000000)
all_lakes = ee.FeatureCollection('ft:13s-6qZDKWXsLOWyN7Dap5o6Xuh2sehkirzze29o3', "geometry").toList(1000000)
#.filterMetadata(u'AREA_SKM', u'less_than', 300.0).toList(100000)#.filterMetadata(
#u'LAT_DEG', u'less_than', 42.02).filterMetadata( u'LAT_DEG', u'greater_than', 32.55).filterMetadata(
#u'LONG_DEG', u'less_than', -114.04).filterMetadata(u'LONG_DEG', u'greater_than', -125.29).toList(1000000)
#pprint(ee.Feature(all_lakes.get(0)).getInfo())
# display individual image from a date
if args.date:
from cmt.mapclient_qt import centerMap, addToMap
lake = all_lakes.get(0).getInfo()
ee_lake = ee.Feature(all_lakes.get(0))
ee_bounds = ee_lake.geometry().buffer(1000)
collection = get_image_collection(ee_bounds, start_date, end_date)
landsat = ee.Image(collection.first())
#pprint(landsat.getInfo())
center = ee_bounds.centroid().getInfo()['coordinates']
centerMap(center[0], center[1], 11)
addToMap(landsat, {'bands': ['B3', 'B2', 'B1']}, 'Landsat 3,2,1 RGB')
addToMap(landsat, {'bands': ['B7', 'B5', 'B4']}, 'Landsat 7,5,4 RGB', False)
addToMap(landsat, {'bands': ['B6' ]}, 'Landsat 6', False)
clouds = detect_clouds(landsat)
water = detect_water(landsat, clouds)
addToMap(clouds.mask(clouds), {'opacity' : 0.5}, 'Cloud Mask')
addToMap(water.mask(water), {'opacity' : 0.5, 'palette' : '00FFFF'}, 'Water Mask')
addToMap(ee.Feature(ee_bounds))
#print count_water_and_clouds(ee_bounds, landsat).getInfo()
# compute water levels in all images of area
else:
# Create output directory
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
# Fetch ee information for all of the lakes we loaded from the database
all_lakes_local = all_lakes.getInfo()
for i in range(len(all_lakes_local)): # For each lake...
ee_lake = ee.Feature(all_lakes.get(i)) # Get this one lake
# Spawn a processing thread for this lake
LakeThread((all_lakes_local[i], ee_lake, start_date, end_date, args.results_dir))
# Wait in this loop until all of the LakeThreads have stopped
while True:
thread_lock.acquire()
if total_threads == 0:
thread_lock.release()
break
thread_lock.release()
time.sleep(0.1)
| apache-2.0 |
revanthkolli/osf.io | scripts/analytics/utils.py | 6 | 1349 | # -*- coding: utf-8 -*-
import os
import unicodecsv as csv
from bson import ObjectId
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns # noqa
import requests
from website.addons.osfstorage import utils as storage_utils
def oid_to_datetime(oid):
return ObjectId(oid).generation_time
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
pass
def plot_dates(dates, *args, **kwargs):
"""Plot date histogram."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(
[mdates.date2num(each) for each in dates],
*args, **kwargs
)
fig.autofmt_xdate()
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
return fig
def make_csv(fp, rows, headers=None):
writer = csv.writer(fp)
if headers:
writer.writerow(headers)
writer.writerows(rows)
def send_file(app, name, content_type, file_like, node, user):
"""Upload file to OSF."""
file_like.seek(0)
with app.test_request_context():
upload_url = storage_utils.get_waterbutler_upload_url(
user,
node,
path=name,
)
requests.put(
upload_url,
data=file_like,
headers={'Content-Type': content_type},
)
| apache-2.0 |
ZhangXinNan/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 43 | 3449 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
goyalsid/phageParser | populate.py | 3 | 6935 | #!/usr/bin/env python
import argparse
import os
import pickle
import pandas
import requests
from Bio import Entrez, SeqIO
from lxml import html, etree
from tqdm import tqdm
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'phageAPI.settings')
import django
django.setup()
from util.acc import read_accession_file
from util.prunedict import prune_dict
from util import fetch
from restapi.models import (
Organism,
Spacer,
Repeat,
LocusSpacerRepeat,
AntiCRISPR,
Locus
)
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
def populate_organism():
def add_organism(name, accession):
# get the object, this also checks for duplicates
o, created = Organism.objects.get_or_create(
name=name, accession=accession)
return o
def merge_acc_names(accession_list):
acc_name_dict = {}
db = "nuccore"
# Doing batches of 200 to make sure requests to NCBI are not too big
for i in range(0, len(accession_list), 200):
j = i + 200
result_handle = Entrez.efetch(
db=db, rettype="gb", id=accession_list[i:j])
# Populate result per organism name
records = SeqIO.parse(result_handle, 'genbank')
for record in tqdm(records):
# Using NCBI name, which should match accession number passed
acc_name_dict[record.name] = record.annotations['organism']
return acc_name_dict
with open(os.path.join(DATA_DIR, 'bac_accession_list.txt')) as f:
acc_name_dict = list(read_accession_file(f))
# acc_name_dict = merge_acc_names(accession_list)
for acc in acc_name_dict:
add_organism(name=acc_name_dict[acc], accession=acc)
def get_spacer_repeat_files():
spath = os.path.join(DATA_DIR, "spacerdatabase.txt")
surl = ('http://crispr.i2bc.paris-saclay.fr/'
'crispr/BLAST/Spacer/Spacerdatabase')
rpath = os.path.join(DATA_DIR, "repeatdatabase.txt")
rurl = 'http://crispr.i2bc.paris-saclay.fr/crispr/BLAST/DR/DRdatabase'
fetch.fetch(spath, surl)
fetch.fetch(rpath, rurl)
return spath, rpath
def repeatfiletodict(rfile):
rdict = {}
repeatrecords = SeqIO.parse(rfile, 'fasta')
for record in repeatrecords:
accessions = record.name.split('|')
sequence = str(record.seq)
for acc in accessions:
rdict[acc] = {'RepeatSeq': sequence}
return rdict
def addspacerstodict(gendict, sfile):
spacerrecords = SeqIO.parse(sfile, 'fasta')
for record in spacerrecords:
accessions = record.name.split('|')
sequence = str(record.seq)
for acc in accessions:
acc_elems = acc.split('_')
order = acc_elems[-1]
acc_id = '_'.join(acc_elems[:-1])
try:
if 'Spacers' in gendict[acc_id]:
gendict[acc_id]['Spacers'][order] = sequence
else:
gendict[acc_id]['Spacers'] = {order: sequence}
except KeyError:
print('Error on accession id: %s' % acc_id)
return gendict
def addpositionstodict(gendict):
print("Downloading position information from web...")
for accidwithloc in tqdm(gendict):
if 'Start' in gendict[accidwithloc]:
continue
accid = '_'.join(accidwithloc.split('_')[:-1])
url = ('http://crispr.i2bc.paris-saclay.fr/crispr/crispr_db.php?'
'checked%5B%5D={}'.format(accid))
page = requests.get(url)
htmltable = html.fromstring(page.content).xpath(
"//table[normalize-space(@class)='primary_table']")[1]
strtable = etree.tostring(htmltable)
# converts to pandas df and then to numpy array then drop titles
arrtable = pandas.read_html(strtable)[0].as_matrix()[2:]
for row in arrtable:
if row[0] in gendict:
gendict[row[0]]['Start'] = row[2]
gendict[row[0]]['Stop'] = row[3]
else:
if row[1] != 'questionable':
print("Can't find %s in local files" % row[0])
return gendict
def populate_fromlocus(locid, locdict):
accid = '_'.join(locid.split('_')[:-1])
organismset = Organism.objects.filter(accession=accid)
if not organismset.exists():
print('Organism with accid %s not found in db' % accid)
return
organism = organismset[0]
repeat, _ = Repeat.objects.get_or_create(sequence=locdict['RepeatSeq'])
loc_start = int(locdict['Start'])
loc_end = int(locdict['Stop'])
locus, _ = Locus.objects.get_or_create(
organism=organism,
genomic_start=loc_start,
genomic_end=loc_end
)
spacers = locdict['Spacers']
for order in sorted(spacers):
spacer, _ = Spacer.objects.get_or_create(sequence=spacers[order])
order = int(order)
lsr, _ = LocusSpacerRepeat.objects.get_or_create(
locus=locus,
spacer=spacer,
repeat=repeat,
order=order
)
spacer.save()
lsr.save()
locus.save()
repeat.save()
organism.save()
def populate_lsrpair():
print('Downloading files and gathering online data.')
sfile, rfile = get_spacer_repeat_files()
gendict = prune_dict(
addpositionstodict(
addspacerstodict(
repeatfiletodict(rfile), sfile)))
with open('dbbackups/genedict.pickle', 'rb') as f:
pickle.dump(gendict, f, protocol=pickle.HIGHEST_PROTOCOL)
print('Created dictionary and dumped data to genedict.pickle')
print("Populating Spacer, Repeat, SpacerRepeatPair, "
"OrganismSpacerRepeatPair tables")
for locid in tqdm(gendict):
populate_fromlocus(locid, gendict[locid])
def populate_anticrispr():
with open(os.path.join(DATA_DIR, 'antiCRISPR_accessions.txt')) as f:
accession_list = list(read_accession_file(f))
print("Fetching AntiCRISPR entries")
result_handle = Entrez.efetch(
db='protein', rettype="fasta", id=accession_list)
for record in tqdm(SeqIO.parse(result_handle, 'fasta')):
spacer, _ = AntiCRISPR.objects.get_or_create(
accession=record.name,
sequence=str(record.seq))
spacer.save()
def main():
parser = argparse.ArgumentParser(
description='Populate the phageParser database with data from NCBI'
)
parser.add_argument(
'email',
nargs=1,
help=('your email address (does not need to be registered, '
'just used to identify you)')
)
args = parser.parse_args()
Entrez.email = args.email
print("Starting organism population")
populate_organism()
print("Starting LSR population")
populate_lsrpair()
print("Starting AntiCRISPR population")
populate_anticrispr()
if __name__ == '__main__':
main()
| mit |
ArtsiomCh/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
sfu-discourse-lab/SFU_Comment_Extractor | Source_Code/CSV_creation/duplicate_filter.py | 1 | 2706 | ########################
# Author: Kavan Shukla #
########################
import pandas as pd
import argparse
import re
def get_arguments():
'''
argparse object initialization and reading input and output file paths.
input file: merged_old_new_duplicates.csv (-i)
output file: comments_to_flag.txt (-o)
'''
parser = argparse.ArgumentParser(description='csv file identifying duplicates between new and old comments')
parser.add_argument('--merged_csv', '-i', type=str, dest='merged_old_new_duplicates', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/merged_old_new_duplicates.csv',
default=r'/Users/vkolhatk/Data/GnM_CSVs/intermediate_csvs/merged_old_new_duplicates.csv',
help="the input csv file generated from duplicate_identification")
parser.add_argument('--output_comments_to_flag', '-o', type=str, dest='comments_to_flag', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/comments_to_flag.txt',
default=r'/Users/vkolhatk/Data/GnM_CSVs/intermediate_csvs/comments_to_flag.txt',
help="the output file containing the comments to flag as exact_match or similar in the final csv")
args = parser.parse_args()
return args
def longest(row):
'''
:param row: pandas dataframe row object in namedtuple format
Given a pandas dataframe row namedtuple, this module returns the comment_counter for the comment text with highest length.
'''
if len(row.comment1) > len(row.comment2):
return row.comment_counter2
else:
return row.comment_counter1
def filter_duplicates(args):
'''
:param args: argparse object containing the input and output file as attributes
Given an argparse object, this module creates a pandas dataframe from the input csv and filters the
duplicates in two text files and outputs comments_to_delete and comments_to_flag.
1. If the same source and same same parent and threshold > 85 (Confident about the comment being a duplicate)
Flag source 1 comment in the final csv: 'Potential duplicate (similar or exact_match) of comment_id'
'''
duplicates = pd.read_csv(args.merged_old_new_duplicates)
with open(args.comments_to_flag,"w") as to_flag:
for row in duplicates.itertuples():
comment_flag = (row.comment_counter1,row.comment_counter2)
to_flag.write(row.comment_counter1+','+row.comment_counter2+','+str(row.weighted_score) + ','+str(row.token_sort_score)+'\n')
if __name__=="__main__":
args = get_arguments()
filter_duplicates(args) | mit |
rhattersley/iris | docs/iris/example_code/General/SOI_filtering.py | 6 | 3196 | """
Applying a filter to a time-series
==================================
This example demonstrates low pass filtering a time-series by applying a
weighted running mean over the time dimension.
The time-series used is the Darwin-only Southern Oscillation index (SOI),
which is filtered using two different Lanczos filters, one to filter out
time-scales of less than two years and one to filter out time-scales of
less than 7 years.
References
----------
Duchon C. E. (1979) Lanczos Filtering in One and Two Dimensions.
Journal of Applied Meteorology, Vol 18, pp 1016-1022.
Trenberth K. E. (1984) Signal Versus Noise in the Southern Oscillation.
Monthly Weather Review, Vol 112, pp 326-332
"""
import numpy as np
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
def low_pass_weights(window, cutoff):
"""Calculate weights for a low pass Lanczos filter.
Args:
window: int
The length of the filter window.
cutoff: float
The cutoff frequency in inverse time steps.
"""
order = ((window - 1) // 2) + 1
nwts = 2 * order + 1
w = np.zeros([nwts])
n = nwts // 2
w[n] = 2 * cutoff
k = np.arange(1., n)
sigma = np.sin(np.pi * k / n) * n / (np.pi * k)
firstfactor = np.sin(2. * np.pi * cutoff * k) / (np.pi * k)
w[n-1:0:-1] = firstfactor * sigma
w[n+1:-1] = firstfactor * sigma
return w[1:-1]
def main():
# Enable a future option, to ensure that the netcdf load works the same way
# as in future Iris versions.
iris.FUTURE.netcdf_promote = True
# Load the monthly-valued Southern Oscillation Index (SOI) time-series.
fname = iris.sample_data_path('SOI_Darwin.nc')
soi = iris.load_cube(fname)
# Window length for filters.
window = 121
# Construct 2-year (24-month) and 7-year (84-month) low pass filters
# for the SOI data which is monthly.
wgts24 = low_pass_weights(window, 1. / 24.)
wgts84 = low_pass_weights(window, 1. / 84.)
# Apply each filter using the rolling_window method used with the weights
# keyword argument. A weighted sum is required because the magnitude of
# the weights are just as important as their relative sizes.
soi24 = soi.rolling_window('time',
iris.analysis.SUM,
len(wgts24),
weights=wgts24)
soi84 = soi.rolling_window('time',
iris.analysis.SUM,
len(wgts84),
weights=wgts84)
# Plot the SOI time series and both filtered versions.
plt.figure(figsize=(9, 4))
iplt.plot(soi, color='0.7', linewidth=1., linestyle='-',
alpha=1., label='no filter')
iplt.plot(soi24, color='b', linewidth=2., linestyle='-',
alpha=.7, label='2-year filter')
iplt.plot(soi84, color='r', linewidth=2., linestyle='-',
alpha=.7, label='7-year filter')
plt.ylim([-4, 4])
plt.title('Southern Oscillation Index (Darwin Only)')
plt.xlabel('Time')
plt.ylabel('SOI')
plt.legend(fontsize=10)
iplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
tosolveit/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
tacaswell/bokeh | setup.py | 6 | 19751 | """Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
package_data = []
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned error message:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
msg = proc.stderr.read().decode('ascii', errors='ignore')
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_FAIL_MSG % red(msg))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
stamp, txt = pat.match(line).groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
blddir = join("bokehjs", "build")
bkjs_size = os.stat(join(blddir, "js", "bokeh.js")).st_size / 2**10
bkjs_min_size = os.stat(join(blddir, "js", "bokeh.min.js")).st_size / 2**10
bkcss_size = os.stat(join(blddir, "css", "bokeh.css")).st_size / 2**10
bkcss_min_size = os.stat(join(blddir, "css", "bokeh.min.css")).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % bkjs_size)
print(" - bokeh.css : %6.1f KB" % bkcss_size)
print(" - bokeh.min.js : %6.1f KB" % bkjs_min_size)
print(" - bokeh.min.css : %6.1f KB" % bkcss_min_size)
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
build_dir = 'build/lib/bokeh'
if os.path.exists(build_dir):
dir_util.remove_tree(build_dir)
for root, dirs, files in os.walk('.'):
for item in files:
if item.endswith('.pyc'):
os.remove(os.path.join(root, item))
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Aliases for build_js and install_js
for i in range(len(sys.argv)):
if sys.argv[i] == '--build-js':
sys.argv[i] = '--build_js'
if sys.argv[i] == '--install-js':
sys.argv[i] = '--install_js'
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics')
package_path(join(SERVER, 'static'))
package_path(join(SERVER, '_templates'))
package_path(join(ROOT, 'bokeh', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
package_path(join(ROOT, 'bokeh', 'server', 'redis.conf'))
scripts = ['bokeh-server', 'websocket_worker.py']
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'Flask>=0.10.1',
'Jinja2>=2.7',
'MarkupSafe>=0.18',
'Werkzeug>=0.9.1',
'greenlet>=0.4.1',
'itsdangerous>=0.21',
'python-dateutil>=2.1',
'requests>=1.2.3',
'six>=1.5.2',
'pygments>=1.6',
'pystache>=0.5.3',
'markdown>=2.3.1',
'PyYAML>=3.10',
'pyzmq>=14.3.1',
'tornado>=4.0.1',
# cli
# 'click>=3.3',
# tests
# 'pytest'
# 'mock>=1.0.1',
'colorama>=0.2.7'
]
if sys.version_info[:2] == (2, 6):
REQUIRES.append('argparse>=1.1')
# if sys.platform != "win32":
# REQUIRES.append('redis>=2.7.6')
if platform.python_implementation() != "PyPy":
# You need to install PyPy's fork of NumPy to make it work:
# pip install git+https://bitbucket.org/pypy/numpy.git
# Also pandas is not yet working with PyPy .
REQUIRES.extend([
'numpy>=1.7.1',
'pandas>=0.11.0'
])
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=[
'bokeh',
'bokeh.models',
'bokeh.models.tests',
'bokeh.models.widgets',
'bokeh.charts',
'bokeh.charts.builder',
'bokeh.charts.builder.tests',
'bokeh.charts.tests',
'bokeh.compat',
'bokeh.compat.mplexporter',
'bokeh.compat.mplexporter.renderers',
'bokeh.crossfilter',
'bokeh.sampledata',
'bokeh.server',
'bokeh.server.models',
'bokeh.server.storage',
'bokeh.server.tests',
'bokeh.server.utils',
'bokeh.server.views',
'bokeh.server.websocket',
'bokeh.server.zmq',
'bokeh.sphinxext',
'bokeh.tests',
'bokeh.transforms',
'bokeh.util',
'bokeh.util.tests',
'bokeh.validation',
],
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='[email protected]',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=scripts,
zip_safe=False,
install_requires=REQUIRES
)
| bsd-3-clause |
qifeigit/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
madmax983/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_DEPRECATED_benignKmeans.py | 2 | 1211 | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
def benignKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
# Log.info("Importing benign.csv data...\n")
benign_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/benign.csv"))
#benign_h2o.summary()
benign_sci = np.genfromtxt(pyunit_utils.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
benign_sci = imp.fit_transform(benign_sci)
# Log.info(paste("H2O K-Means with ", i, " clusters:\n", sep = ""))
for i in range(1,7):
benign_h2o_km = h2o.kmeans(x=benign_h2o, k=i)
print "H2O centers"
print benign_h2o_km.centers()
benign_sci_km = KMeans(n_clusters=i, init='k-means++', n_init=1)
benign_sci_km.fit(benign_sci)
print "sckit centers"
print benign_sci_km.cluster_centers_
if __name__ == "__main__":
pyunit_utils.standalone_test(benignKmeans)
else:
benignKmeans()
| apache-2.0 |
xyguo/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 43 | 24671 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
rebeccabilbro/machine-learning | code/abaloneUtils.py | 5 | 4299 | # utils
# Utility functions for handling data
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Thu Feb 26 17:47:35 2015 -0500
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: utils.py [] [email protected] $
"""
Utility functions for handling data
"""
##########################################################################
## Imports
##########################################################################
import os
import csv
import time
import json
import numpy as np
from sklearn.datasets.base import Bunch
##########################################################################
## Module Constants
##########################################################################
SKL_DATA = "SCIKIT_LEARN_DATA"
BASE_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
DATA_DIR = os.path.join(BASE_DIR, "data")
CODE_DIR = os.path.join(BASE_DIR, "code")
##########################################################################
## Helper Functions
##########################################################################
def timeit(func):
"""
Returns how long a function took to execute, along with the output
"""
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
return result, time.time() - start
return timeit
##########################################################################
## Dataset Loading
##########################################################################
def get_data_home(data_home=None):
"""
Returns the path of the data directory
"""
if data_home is None:
data_home = os.environ.get(SKL_DATA, DATA_DIR)
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_data(path, descr=None, target_index=-1):
"""
Returns a scklearn dataset Bunch which includes several important
attributes that are used in modeling:
data: array of shape n_samples * n_features
target: array of length n_samples
feature_names: names of the features
target_names: names of the targets
filenames: names of the files that were loaded
DESCR: contents of the readme
This data therefore has the look and feel of the toy datasets.
Pass in a path usually just the name of the location in the data dir.
It will be joined with the result of `get_data_home`. The contents are:
path
- abalone.names # The file to load into DESCR
- meta.json # A file containing metadata to load
- dataset.txt # The numpy loadtxt file
- dataset.csv # The pandas read_csv file
You can specify another descr, another feature_names, and whether or
not the dataset has a header row. You can also specify the index of the
target, which by default is the last item in the row (-1)
"""
root = os.path.join(get_data_home(), path)
filenames = {
'meta': os.path.join(root, 'meta.json'),
'rdme': os.path.join(root, 'abalone.names'),
'data': os.path.join(root, 'dataset.csv'),
}
target_names = None
feature_names = None
DESCR = None
with open(filenames['meta'], 'r') as f:
meta = json.load(f)
target_names = meta['target_names']
feature_names = meta['feature_names']
with open(filenames['rdme'], 'r') as f:
DESCR = f.read()
# skip header from csv, load data
dataset = np.loadtxt(filenames['data'], delimiter=',', skiprows=1)
data = None
target = None
# Target assumed to be either last or first row
if target_index == -1:
data = dataset[:,0:-1]
target = dataset[:,-1]
elif target_index == 0:
data = dataset[:,1:]
target = dataset[:,0]
else:
raise ValueError("Target index must be either -1 or 0")
return Bunch(data=data,
target=target,
filenames=filenames,
target_names=target_names,
feature_names=feature_names,
DESCR=DESCR)
def load_abalone():
return load_data('abalone')
| mit |
xuanyuanking/spark | python/pyspark/pandas/accessors.py | 9 | 35647 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
pandas-on-Spark specific features.
"""
import inspect
from typing import Any, Callable, Optional, Tuple, Union, TYPE_CHECKING, cast
from types import FunctionType
import numpy as np # noqa: F401
import pandas as pd
from pyspark.sql import functions as F
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import DataType, LongType, StructField, StructType
from pyspark.pandas._typing import DataFrameOrSeries, Name
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
SPARK_INDEX_NAME_FORMAT,
SPARK_DEFAULT_SERIES_NAME,
)
from pyspark.pandas.typedef import infer_return_type, DataFrameType, ScalarType, SeriesType
from pyspark.pandas.utils import (
is_name_like_value,
is_name_like_tuple,
name_like_string,
scol_for,
verify_temp_column_name,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.sql._typing import UserDefinedFunctionLike
class PandasOnSparkFrameMethods(object):
"""pandas-on-Spark specific features for DataFrame."""
def __init__(self, frame: "DataFrame"):
self._psdf = frame
def attach_id_column(self, id_type: str, column: Name) -> "DataFrame":
"""
Attach a column to be used as identifier of rows similar to the default index.
See also `Default Index type
<https://koalas.readthedocs.io/en/latest/user_guide/options.html#default-index-type>`_.
Parameters
----------
id_type : string
The id type.
- 'sequence' : a sequence that increases one by one.
.. note:: this uses Spark's Window without specifying partition specification.
This leads to move all data into single partition in single machine and
could cause serious performance degradation.
Avoid this method against very large dataset.
- 'distributed-sequence' : a sequence that increases one by one,
by group-by and group-map approach in a distributed manner.
- 'distributed' : a monotonically increasing sequence simply by using PySpark’s
monotonically_increasing_id function in a fully distributed manner.
column : string or tuple of string
The column name.
Returns
-------
DataFrame
The DataFrame attached the column.
Examples
--------
>>> df = ps.DataFrame({"x": ['a', 'b', 'c']})
>>> df.pandas_on_spark.attach_id_column(id_type="sequence", column="id")
x id
0 a 0
1 b 1
2 c 2
>>> df.pandas_on_spark.attach_id_column(id_type="distributed-sequence", column=0)
x 0
0 a 0
1 b 1
2 c 2
>>> df.pandas_on_spark.attach_id_column(id_type="distributed", column=0.0)
... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
x 0.0
0 a ...
1 b ...
2 c ...
For multi-index columns:
>>> df = ps.DataFrame({("x", "y"): ['a', 'b', 'c']})
>>> df.pandas_on_spark.attach_id_column(id_type="sequence", column=("id-x", "id-y"))
x id-x
y id-y
0 a 0
1 b 1
2 c 2
>>> df.pandas_on_spark.attach_id_column(id_type="distributed-sequence", column=(0, 1.0))
x 0
y 1.0
0 a 0
1 b 1
2 c 2
"""
from pyspark.pandas.frame import DataFrame
if id_type == "sequence":
attach_func = InternalFrame.attach_sequence_column
elif id_type == "distributed-sequence":
attach_func = InternalFrame.attach_distributed_sequence_column
elif id_type == "distributed":
attach_func = InternalFrame.attach_distributed_column
else:
raise ValueError(
"id_type should be one of 'sequence', 'distributed-sequence' and 'distributed'"
)
assert is_name_like_value(column, allow_none=False), column
if not is_name_like_tuple(column):
column = (column,)
internal = self._psdf._internal
if len(column) != internal.column_labels_level:
raise ValueError(
"The given column `{}` must be the same length as the existing columns.".format(
column
)
)
elif column in internal.column_labels:
raise ValueError(
"The given column `{}` already exists.".format(name_like_string(column))
)
# Make sure the underlying Spark column names are the form of
# `name_like_string(column_label)`.
sdf = internal.spark_frame.select(
[
scol.alias(SPARK_INDEX_NAME_FORMAT(i))
for i, scol in enumerate(internal.index_spark_columns)
]
+ [
scol.alias(name_like_string(label))
for scol, label in zip(internal.data_spark_columns, internal.column_labels)
]
)
sdf, force_nullable = attach_func(sdf, name_like_string(column))
return DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i)) for i in range(internal.index_level)
],
index_names=internal.index_names,
index_fields=(
[field.copy(nullable=True) for field in internal.index_fields]
if force_nullable
else internal.index_fields
),
column_labels=internal.column_labels + [column],
data_spark_columns=(
[scol_for(sdf, name_like_string(label)) for label in internal.column_labels]
+ [scol_for(sdf, name_like_string(column))]
),
data_fields=(
(
[field.copy(nullable=True) for field in internal.data_fields]
if force_nullable
else internal.data_fields
)
+ [
InternalField.from_struct_field(
StructField(name_like_string(column), LongType(), nullable=False)
)
]
),
column_label_names=internal.column_label_names,
).resolved_copy
)
def apply_batch(
self, func: Callable[..., pd.DataFrame], args: Tuple = (), **kwds: Any
) -> "DataFrame":
"""
Apply a function that takes pandas DataFrame and outputs pandas DataFrame. The pandas
DataFrame given to the function is of a batch used internally.
See also `Transform and apply a function
<https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.
.. note:: the `func` is unable to access to the whole input frame. pandas-on-Spark
internally splits the input series into multiple batches and calls `func` with each
batch multiple times. Therefore, operations such as global aggregations are impossible.
See the example below.
>>> # This case does not return the length of whole frame but of the batch internally
... # used.
... def length(pdf) -> ps.DataFrame[int]:
... return pd.DataFrame([len(pdf)])
...
>>> df = ps.DataFrame({'A': range(1000)})
>>> df.pandas_on_spark.apply_batch(length) # doctest: +SKIP
c0
0 83
1 83
2 83
...
10 83
11 83
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def plus_one(x) -> ps.DataFrame[float, float]:
... return x + 1
If the return type is specified, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``.
To specify the column names, you can assign them in a pandas friendly style as below:
>>> def plus_one(x) -> ps.DataFrame["a": float, "b": float]:
... return x + 1
>>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
>>> def plus_one(x) -> ps.DataFrame[zip(pdf.dtypes, pdf.columns)]:
... return x + 1
When the given function has the return type annotated, the original index of the
DataFrame will be lost and a default index will be attached to the result DataFrame.
Please be careful about configuring the default index. See also `Default Index Type
<https://koalas.readthedocs.io/en/latest/user_guide/options.html#default-index-type>`_.
Parameters
----------
func : function
Function to apply to each pandas frame.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
DataFrame
See Also
--------
DataFrame.apply: For row/columnwise operations.
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Series.pandas_on_spark.transform_batch: transform the search as each pandas chunks.
Examples
--------
>>> df = ps.DataFrame([(1, 2), (3, 4), (5, 6)], columns=['A', 'B'])
>>> df
A B
0 1 2
1 3 4
2 5 6
>>> def query_func(pdf) -> ps.DataFrame[int, int]:
... return pdf.query('A == 1')
>>> df.pandas_on_spark.apply_batch(query_func)
c0 c1
0 1 2
>>> def query_func(pdf) -> ps.DataFrame["A": int, "B": int]:
... return pdf.query('A == 1')
>>> df.pandas_on_spark.apply_batch(query_func)
A B
0 1 2
You can also omit the type hints so pandas-on-Spark infers the return schema as below:
>>> df.pandas_on_spark.apply_batch(lambda pdf: pdf.query('A == 1'))
A B
0 1 2
You can also specify extra arguments.
>>> def calculation(pdf, y, z) -> ps.DataFrame[int, int]:
... return pdf ** y + z
>>> df.pandas_on_spark.apply_batch(calculation, args=(10,), z=20)
c0 c1
0 21 1044
1 59069 1048596
2 9765645 60466196
You can also use ``np.ufunc`` and built-in functions as input.
>>> df.pandas_on_spark.apply_batch(np.add, args=(10,))
A B
0 11 12
1 13 14
2 15 16
>>> (df * -1).pandas_on_spark.apply_batch(abs)
A B
0 1 2
1 3 4
2 5 6
"""
# TODO: codes here partially duplicate `DataFrame.apply`. Can we deduplicate?
from pyspark.pandas.groupby import GroupBy
from pyspark.pandas.frame import DataFrame
from pyspark import pandas as ps
if not isinstance(func, FunctionType):
assert callable(func), "the first argument should be a callable function."
f = func
func = lambda *args, **kwargs: f(*args, **kwargs)
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
original_func = func
func = lambda o: original_func(o, *args, **kwds)
self_applied = DataFrame(self._psdf._internal.resolved_copy) # type: DataFrame
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = ps.get_option("compute.shortcut_limit")
pdf = self_applied.head(limit + 1)._to_internal_pandas()
applied = func(pdf)
if not isinstance(applied, pd.DataFrame):
raise ValueError(
"The given function should return a frame; however, "
"the return type was %s." % type(applied)
)
psdf = ps.DataFrame(applied) # type: DataFrame
if len(pdf) <= limit:
return psdf
index_fields = [field.normalize_spark_type() for field in psdf._internal.index_fields]
data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields]
return_schema = StructType([field.struct_field for field in index_fields + data_fields])
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, func, return_schema, retain_index=True
)
sdf = self_applied._internal.spark_frame.mapInPandas(
lambda iterator: map(output_func, iterator), schema=return_schema
)
# If schema is inferred, we can restore indexes too.
internal = psdf._internal.with_new_sdf(
spark_frame=sdf, index_fields=index_fields, data_fields=data_fields
)
else:
return_type = infer_return_type(original_func)
is_return_dataframe = isinstance(return_type, DataFrameType)
if not is_return_dataframe:
raise TypeError(
"The given function should specify a frame as its type "
"hints; however, the return type was %s." % return_sig
)
return_schema = cast(DataFrameType, return_type).spark_type
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, func, return_schema, retain_index=False
)
sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(
lambda iterator: map(output_func, iterator), schema=return_schema
)
# Otherwise, it loses index.
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=None,
data_fields=cast(DataFrameType, return_type).fields,
)
return DataFrame(internal)
def transform_batch(
self, func: Callable[..., Union[pd.DataFrame, pd.Series]], *args: Any, **kwargs: Any
) -> DataFrameOrSeries:
"""
Transform chunks with a function that takes pandas DataFrame and outputs pandas DataFrame.
The pandas DataFrame given to the function is of a batch used internally. The length of
each input and output should be the same.
See also `Transform and apply a function
<https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.
.. note:: the `func` is unable to access to the whole input frame. pandas-on-Spark
internally splits the input series into multiple batches and calls `func` with each
batch multiple times. Therefore, operations such as global aggregations are impossible.
See the example below.
>>> # This case does not return the length of whole frame but of the batch internally
... # used.
... def length(pdf) -> ps.DataFrame[int]:
... return pd.DataFrame([len(pdf)] * len(pdf))
...
>>> df = ps.DataFrame({'A': range(1000)})
>>> df.pandas_on_spark.transform_batch(length) # doctest: +SKIP
c0
0 83
1 83
2 83
...
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def plus_one(x) -> ps.DataFrame[float, float]:
... return x + 1
If the return type is specified, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``.
To specify the column names, you can assign them in a pandas friendly style as below:
>>> def plus_one(x) -> ps.DataFrame['a': float, 'b': float]:
... return x + 1
>>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
>>> def plus_one(x) -> ps.DataFrame[zip(pdf.dtypes, pdf.columns)]:
... return x + 1
When the given function returns DataFrame and has the return type annotated, the
original index of the DataFrame will be lost and then a default index will be attached
to the result. Please be careful about configuring the default index. See also
`Default Index Type
<https://koalas.readthedocs.io/en/latest/user_guide/options.html#default-index-type>`_.
Parameters
----------
func : function
Function to transform each pandas frame.
*args
Positional arguments to pass to func.
**kwargs
Keyword arguments to pass to func.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.pandas_on_spark.apply_batch: For row/columnwise operations.
Series.pandas_on_spark.transform_batch: transform the search as each pandas chunks.
Examples
--------
>>> df = ps.DataFrame([(1, 2), (3, 4), (5, 6)], columns=['A', 'B'])
>>> df
A B
0 1 2
1 3 4
2 5 6
>>> def plus_one_func(pdf) -> ps.DataFrame[int, int]:
... return pdf + 1
>>> df.pandas_on_spark.transform_batch(plus_one_func)
c0 c1
0 2 3
1 4 5
2 6 7
>>> def plus_one_func(pdf) -> ps.DataFrame['A': int, 'B': int]:
... return pdf + 1
>>> df.pandas_on_spark.transform_batch(plus_one_func)
A B
0 2 3
1 4 5
2 6 7
>>> def plus_one_func(pdf) -> ps.Series[int]:
... return pdf.B + 1
>>> df.pandas_on_spark.transform_batch(plus_one_func)
0 3
1 5
2 7
dtype: int64
You can also omit the type hints so pandas-on-Spark infers the return schema as below:
>>> df.pandas_on_spark.transform_batch(lambda pdf: pdf + 1)
A B
0 2 3
1 4 5
2 6 7
>>> (df * -1).pandas_on_spark.transform_batch(abs)
A B
0 1 2
1 3 4
2 5 6
Note that you should not transform the index. The index information will not change.
>>> df.pandas_on_spark.transform_batch(lambda pdf: pdf.B + 1)
0 3
1 5
2 7
Name: B, dtype: int64
You can also specify extra arguments as below.
>>> df.pandas_on_spark.transform_batch(lambda pdf, a, b, c: pdf.B + a + b + c, 1, 2, c=3)
0 8
1 10
2 12
Name: B, dtype: int64
"""
from pyspark.pandas.groupby import GroupBy
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import first_series
from pyspark import pandas as ps
assert callable(func), "the first argument should be a callable function."
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
original_func = func
func = lambda o: original_func(o, *args, **kwargs)
def apply_func(pdf: pd.DataFrame) -> pd.DataFrame:
return func(pdf).to_frame()
def pandas_series_func(
f: Callable[[pd.DataFrame], pd.DataFrame], return_type: DataType
) -> "UserDefinedFunctionLike":
ff = f
@pandas_udf(returnType=return_type) # type: ignore
def udf(pdf: pd.DataFrame) -> pd.Series:
return first_series(ff(pdf))
return udf
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = ps.get_option("compute.shortcut_limit")
pdf = self._psdf.head(limit + 1)._to_internal_pandas()
transformed = func(pdf)
if not isinstance(transformed, (pd.DataFrame, pd.Series)):
raise ValueError(
"The given function should return a frame; however, "
"the return type was %s." % type(transformed)
)
if len(transformed) != len(pdf):
raise ValueError("transform_batch cannot produce aggregated results")
psdf_or_psser = ps.from_pandas(transformed)
if isinstance(psdf_or_psser, ps.Series):
psser = cast(ps.Series, psdf_or_psser)
field = psser._internal.data_fields[0].normalize_spark_type()
return_schema = StructType([field.struct_field])
output_func = GroupBy._make_pandas_df_builder_func(
self._psdf, apply_func, return_schema, retain_index=False
)
pudf = pandas_series_func(output_func, return_type=field.spark_type)
columns = self._psdf._internal.spark_columns
# TODO: Index will be lost in this case.
internal = self._psdf._internal.copy(
column_labels=psser._internal.column_labels,
data_spark_columns=[pudf(F.struct(*columns)).alias(field.name)],
data_fields=[field],
column_label_names=psser._internal.column_label_names,
)
return first_series(DataFrame(internal))
else:
psdf = cast(DataFrame, psdf_or_psser)
if len(pdf) <= limit:
# only do the short cut when it returns a frame to avoid
# operations on different dataframes in case of series.
return psdf
index_fields = [
field.normalize_spark_type() for field in psdf._internal.index_fields
]
data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields]
return_schema = StructType(
[field.struct_field for field in index_fields + data_fields]
)
self_applied = DataFrame(self._psdf._internal.resolved_copy) # type: DataFrame
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, func, return_schema, retain_index=True
)
columns = self_applied._internal.spark_columns
pudf = pandas_udf(output_func, returnType=return_schema) # type: ignore
temp_struct_column = verify_temp_column_name(
self_applied._internal.spark_frame, "__temp_struct__"
)
applied = pudf(F.struct(*columns)).alias(temp_struct_column)
sdf = self_applied._internal.spark_frame.select(applied)
sdf = sdf.selectExpr("%s.*" % temp_struct_column)
return DataFrame(
psdf._internal.with_new_sdf(
spark_frame=sdf, index_fields=index_fields, data_fields=data_fields
)
)
else:
return_type = infer_return_type(original_func)
is_return_series = isinstance(return_type, SeriesType)
is_return_dataframe = isinstance(return_type, DataFrameType)
if not is_return_dataframe and not is_return_series:
raise TypeError(
"The given function should specify a frame or series as its type "
"hints; however, the return type was %s." % return_sig
)
if is_return_series:
field = InternalField(
dtype=cast(SeriesType, return_type).dtype,
struct_field=StructField(
name=SPARK_DEFAULT_SERIES_NAME,
dataType=cast(SeriesType, return_type).spark_type,
),
).normalize_spark_type()
return_schema = StructType([field.struct_field])
output_func = GroupBy._make_pandas_df_builder_func(
self._psdf, apply_func, return_schema, retain_index=False
)
pudf = pandas_series_func(output_func, return_type=field.spark_type)
columns = self._psdf._internal.spark_columns
internal = self._psdf._internal.copy(
column_labels=[None],
data_spark_columns=[pudf(F.struct(*columns)).alias(field.name)],
data_fields=[field],
column_label_names=None,
)
return first_series(DataFrame(internal))
else:
data_fields = [
field.normalize_spark_type()
for field in cast(DataFrameType, return_type).fields
]
return_schema = StructType([field.struct_field for field in data_fields])
self_applied = DataFrame(self._psdf._internal.resolved_copy)
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, func, return_schema, retain_index=False
)
columns = self_applied._internal.spark_columns
pudf = pandas_udf(output_func, returnType=return_schema) # type: ignore
temp_struct_column = verify_temp_column_name(
self_applied._internal.spark_frame, "__temp_struct__"
)
applied = pudf(F.struct(*columns)).alias(temp_struct_column)
sdf = self_applied._internal.spark_frame.select(applied)
sdf = sdf.selectExpr("%s.*" % temp_struct_column)
internal = InternalFrame(
spark_frame=sdf, index_spark_columns=None, data_fields=data_fields
)
return DataFrame(internal)
class PandasOnSparkSeriesMethods(object):
"""pandas-on-Spark specific features for Series."""
def __init__(self, series: "Series"):
self._psser = series
def transform_batch(
self, func: Callable[..., pd.Series], *args: Any, **kwargs: Any
) -> "Series":
"""
Transform the data with the function that takes pandas Series and outputs pandas Series.
The pandas Series given to the function is of a batch used internally.
See also `Transform and apply a function
<https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.
.. note:: the `func` is unable to access to the whole input series. pandas-on-Spark
internally splits the input series into multiple batches and calls `func` with each
batch multiple times. Therefore, operations such as global aggregations are impossible.
See the example below.
>>> # This case does not return the length of whole frame but of the batch internally
... # used.
... def length(pser) -> ps.Series[int]:
... return pd.Series([len(pser)] * len(pser))
...
>>> df = ps.DataFrame({'A': range(1000)})
>>> df.A.pandas_on_spark.transform_batch(length) # doctest: +SKIP
c0
0 83
1 83
2 83
...
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def plus_one(x) -> ps.Series[int]:
... return x + 1
Parameters
----------
func : function
Function to apply to each pandas frame.
*args
Positional arguments to pass to func.
**kwargs
Keyword arguments to pass to func.
Returns
-------
DataFrame
See Also
--------
DataFrame.pandas_on_spark.apply_batch : Similar but it takes pandas DataFrame as its
internal batch.
Examples
--------
>>> df = ps.DataFrame([(1, 2), (3, 4), (5, 6)], columns=['A', 'B'])
>>> df
A B
0 1 2
1 3 4
2 5 6
>>> def plus_one_func(pser) -> ps.Series[np.int64]:
... return pser + 1
>>> df.A.pandas_on_spark.transform_batch(plus_one_func)
0 2
1 4
2 6
Name: A, dtype: int64
You can also omit the type hints so pandas-on-Spark infers the return schema as below:
>>> df.A.pandas_on_spark.transform_batch(lambda pser: pser + 1)
0 2
1 4
2 6
Name: A, dtype: int64
You can also specify extra arguments.
>>> def plus_one_func(pser, a, b, c=3) -> ps.Series[np.int64]:
... return pser + a + b + c
>>> df.A.pandas_on_spark.transform_batch(plus_one_func, 1, b=2)
0 7
1 9
2 11
Name: A, dtype: int64
You can also use ``np.ufunc`` and built-in functions as input.
>>> df.A.pandas_on_spark.transform_batch(np.add, 10)
0 11
1 13
2 15
Name: A, dtype: int64
>>> (df * -1).A.pandas_on_spark.transform_batch(abs)
0 1
1 3
2 5
Name: A, dtype: int64
"""
assert callable(func), "the first argument should be a callable function."
return_sig = None
try:
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
except TypeError:
# Falls back to schema inference if it fails to get signature.
pass
return_type = None
if return_sig is not None:
# Extract the signature arguments from this function.
sig_return = infer_return_type(func)
if not isinstance(sig_return, SeriesType):
raise ValueError(
"Expected the return type of this function to be of type column,"
" but found type {}".format(sig_return)
)
return_type = cast(SeriesType, sig_return)
return self._transform_batch(lambda c: func(c, *args, **kwargs), return_type)
def _transform_batch(
self, func: Callable[..., pd.Series], return_type: Optional[Union[SeriesType, ScalarType]]
) -> "Series":
from pyspark.pandas.groupby import GroupBy
from pyspark.pandas.series import Series, first_series
from pyspark import pandas as ps
if not isinstance(func, FunctionType):
f = func
func = lambda *args, **kwargs: f(*args, **kwargs)
if return_type is None:
# TODO: In this case, it avoids the shortcut for now (but only infers schema)
# because it returns a series from a different DataFrame and it has a different
# anchor. We should fix this to allow the shortcut or only allow to infer
# schema.
limit = ps.get_option("compute.shortcut_limit")
pser = self._psser.head(limit + 1)._to_internal_pandas()
transformed = pser.transform(func)
psser = Series(transformed) # type: Series
field = psser._internal.data_fields[0].normalize_spark_type()
else:
spark_return_type = return_type.spark_type
dtype = return_type.dtype
field = InternalField(
dtype=dtype,
struct_field=StructField(
name=self._psser._internal.data_spark_column_names[0],
dataType=spark_return_type,
),
)
psdf = self._psser.to_frame()
columns = psdf._internal.spark_column_names
def pandas_concat(*series: pd.Series) -> pd.DataFrame:
# The input can only be a DataFrame for struct from Spark 3.0.
# This works around to make the input as a frame. See SPARK-27240
pdf = pd.concat(series, axis=1)
pdf.columns = columns
return pdf
def apply_func(pdf: pd.DataFrame) -> pd.DataFrame:
return func(first_series(pdf)).to_frame()
return_schema = StructType([StructField(SPARK_DEFAULT_SERIES_NAME, field.spark_type)])
output_func = GroupBy._make_pandas_df_builder_func(
psdf, apply_func, return_schema, retain_index=False
)
@pandas_udf(returnType=field.spark_type) # type: ignore
def pudf(*series: pd.Series) -> pd.Series:
return first_series(output_func(pandas_concat(*series)))
return self._psser._with_new_scol(
scol=pudf(*psdf._internal.spark_columns).alias(field.name), field=field
)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.accessors
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.accessors.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.accessors tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.accessors,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
aerokappa/SantaClaus | processInput.py | 1 | 1310 | from loadInput import loadInput
from findGiftWeight import findGiftWeight
import numpy as np
import pandas as pd
def processInput( fileName ):
giftList = loadInput( fileName )
nGifts = len(giftList)
giftList['GiftType'] = giftList.GiftId.apply(lambda x: x.split('_')[0])
giftList['GiftWeight'] = np.zeros(nGifts)
giftList['GiftWeight'] = giftList.GiftType.apply(lambda x: findGiftWeight(x,1)[0])
giftListSummary = pd.DataFrame()
giftListSummary['GiftType'] = giftList['GiftType'].unique()
nGiftTypes = len(giftListSummary['GiftType'])
giftListSummary['nGifts'] = giftListSummary.GiftType.apply(lambda x : len(giftList[giftList['GiftType']==x]))
giftListSummary['nGiftsPacked'] = 0
giftListSummary['nGiftsNotPacked'] = giftListSummary.GiftType.apply(lambda x : len(giftList[giftList['GiftType']==x]))
giftListSummary['weight_average'] = np.zeros(nGiftTypes)
giftListSummary['weight_STD'] = np.zeros(nGiftTypes)
n = 100000 #an arbitrarily large number for statistical analysis
for i in np.arange(nGiftTypes):
x = findGiftWeight(giftListSummary['GiftType'][i], n)
giftListSummary['weight_average'][i] = np.average(x)
giftListSummary['weight_STD'][i] = np.std(x)
return giftList, giftListSummary | mit |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/legend.py | 69 | 30705 | """
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Return value is a sequence of text, line instances that make
up the legend
"""
from __future__ import division
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection
from matplotlib.transforms import Bbox
from matplotlib.offsetbox import HPacker, VPacker, PackerBase, TextArea, DrawingArea
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
Return value is a sequence of text, line instances that make
up the legend
"""
codes = {'best' : 0, # only implemented for axis legends
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc = None,
numpoints = None, # the number of points in the legend line
markerscale = None, # the relative size of legend markers vs. original
scatterpoints = 3, # TODO: may be an rcParam
scatteryoffsets=None,
prop = None, # properties for the legend texts
# the following dimensions are in axes coords
pad = None, # deprecated; use borderpad
labelsep = None, # deprecated; use labelspacing
handlelen = None, # deprecated; use handlelength
handletextsep = None, # deprecated; use handletextpad
axespad = None, # deprecated; use borderaxespad
# spacing & pad defined as a fractionof the font-size
borderpad = None, # the whitespace inside the legend border
labelspacing=None, #the vertical space between the legend entries
handlelength=None, # the length of the legend handles
handletextpad=None, # the pad between the legend handle and text
borderaxespad=None, # the pad between the axes and legend border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns. None, "expand"
fancybox=None, # True use a fancy box, false use a rounded box, none use rc
shadow = None,
):
"""
- *parent* : the artist that contains the legend
- *handles* : a list of artists (lines, patches) to add to the legend
- *labels* : a list of strings to label the legend
Optional keyword arguments:
================ ==================================================================
Keyword Description
================ ==================================================================
loc a location code or a tuple of coordinates
numpoints the number of points in the legend line
prop the font property
markerscale the relative size of legend markers vs. original
fancybox if True, draw a frame with a round fancybox. If None, use rc
shadow if True, draw a shadow behind legend
scatteryoffsets a list of yoffsets for scatter symbols in legend
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
The dimensions of pad and spacing are given as a fraction of the
fontsize. Values from rcParams will be used if None.
"""
from matplotlib.axes import Axes # local import only to avoid circularity
from matplotlib.figure import Figure # local import only to avoid circularity
Artist.__init__(self)
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
else:
self.prop=prop
self.fontsize = self.prop.get_size_in_points()
propnames=['numpoints', 'markerscale', 'shadow', "columnspacing",
"scatterpoints"]
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend."+name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad":"borderpad",
"labelsep":"labelspacing",
"handlelen":"handlelength",
"handletextsep":"handletextpad",
"axespad":"borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height)/self.fontsize
for k, v in deprecated_kwds.items():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
DeprecationWarning)
setattr(self, v, localdict[k]*axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend."+v])
else:
setattr(self, v, localdict[v])
del localdict
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be >= 0; it was %d"% numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3./8., 4./8., 2.5/8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.numpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets, reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent,Axes):
self.isaxes = True
self.set_figure(parent.figure)
elif isinstance(parent,Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0,'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back on "best"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._loc = loc
self._mode = mode
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox == True:
self.legendPatch.set_boxstyle("round",pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square",pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = True
# init with null renderer
self._init_legend_box(handles, labels)
self._last_fontsize_points = self.fontsize
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
for c in self.get_children():
c.set_figure(self.figure)
a.set_transform(self.get_transform())
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox+xdescent, oy+ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc)==2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.parent.bbox
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox, self.parent.bbox, renderer)
return x+xdescent, y+ydescent
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible(): return
self._update_legend_box(renderer)
renderer.open_group('legend')
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
if self._loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
def findoffset(width, height, xdescent, ydescent):
return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(findoffset)
fontsize = renderer.points_to_pixels(self.fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2*(self.borderaxespad+self.borderpad)*fontsize
self._legend_box.set_width(self.parent.bbox.width-pad)
if self._drawFrame:
# update the location and size of the legend
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self.fontsize
else:
return renderer.points_to_pixels(self.fontsize)
def _init_legend_box(self, handles, labels):
"""
Initiallize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self.fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
for l in labels:
textbox = TextArea(l, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height() * 0.7
descent = 0.
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their corrdinates should
# be given in the display coordinates.
# NOTE : the coordinates will be updated again in
# _update_legend_box() method.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
for handle in handles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
legline.update_from(handle)
self._set_artist_props(legline) # after update
legline.set_clip_box(None)
legline.set_clip_path(None)
legline.set_drawstyle('default')
legline.set_marker('None')
handle_list.append(legline)
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
legline_marker.update_from(handle)
self._set_artist_props(legline_marker)
legline_marker.set_clip_box(None)
legline_marker.set_clip_path(None)
legline_marker.set_linestyle('None')
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correpondence.
legline._legmarker = legline_marker
elif isinstance(handle, Patch):
p = Rectangle(xy=(0., 0.),
width = self.handlelength*fontsize,
height=(height-descent),
)
p.update_from(handle)
self._set_artist_props(p)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
elif isinstance(handle, LineCollection):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self._set_artist_props(legline)
legline.set_clip_box(None)
legline.set_clip_path(None)
lw = handle.get_linewidth()[0]
dashes = handle.get_dashes()[0]
color = handle.get_colors()[0]
legline.set_color(color)
legline.set_linewidth(lw)
legline.set_dashes(dashes)
handle_list.append(legline)
elif isinstance(handle, RegularPolyCollection):
#ydata = self._scatteryoffsets
ydata = height*self._scatteryoffsets
size_max, size_min = max(handle.get_sizes()),\
min(handle.get_sizes())
# we may need to scale these sizes by "markerscale"
# attribute. But other handle types does not seem
# to care about this attribute and it is currently ignored.
if self.scatterpoints < 4:
sizes = [.5*(size_max+size_min), size_max,
size_min]
else:
sizes = (size_max-size_min)*np.linspace(0,1,self.scatterpoints)+size_min
p = type(handle)(handle.get_numsides(),
rotation=handle.get_rotation(),
sizes=sizes,
offsets=zip(xdata_marker,ydata),
transOffset=self.get_transform(),
)
p.update_from(handle)
p.set_figure(self.figure)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
else:
handle_list.append(None)
handlebox = DrawingArea(width=self.handlelength*fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handle_list[-1]
handlebox.add_artist(handle)
if hasattr(handle, "_legmarker"):
handlebox.add_artist(handle._legmarker)
handleboxes.append(handlebox)
# We calculate number of lows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaing
# (num_smallcol) columns will have (nrows) rows.
nrows, num_largecol = divmod(len(handleboxes), self._ncol)
num_smallcol = self._ncol-num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0, num_largecol*(nrows+1), (nrows+1)),
[nrows+1] * num_largecol)
smallcol = safezip(range(num_largecol*(nrows+1), len(handleboxes), nrows),
[nrows] * num_smallcol)
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol+smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad*fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0+di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing*fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing*fontsize
self._legend_box = HPacker(pad=self.borderpad*fontsize,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _update_legend_box(self, renderer):
"""
Update the dimension of the legend_box. This is required
becuase the paddings, the hadle size etc. depends on the dpi
of the renderer.
"""
# fontsize in points.
fontsize = renderer.points_to_pixels(self.fontsize)
if self._last_fontsize_points == fontsize:
# no update is needed
return
# each handle needs to be drawn inside a box of
# (x, y, w, h) = (0, -descent, width, height).
# And their corrdinates should be given in the display coordinates.
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height(renderer) * 0.7
descent = 0.
for handle in self.legendHandles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
legline = handle
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline.set_data(xdata, ydata)
legline_marker = legline._legmarker
legline_marker.set_data(xdata_marker, ydata[:len(xdata_marker)])
elif isinstance(handle, Patch):
p = handle
p.set_bounds(0., 0.,
self.handlelength*fontsize,
(height-descent),
)
elif isinstance(handle, RegularPolyCollection):
p = handle
ydata = height*self._scatteryoffsets
p.set_offsets(zip(xdata_marker,ydata))
# correction factor
cor = fontsize / self._last_fontsize_points
# helper function to iterate over all children
def all_children(parent):
yield parent
for c in parent.get_children():
for cc in all_children(c): yield cc
#now update paddings
for box in all_children(self._legend_box):
if isinstance(box, PackerBase):
box.pad = box.pad * cor
box.sep = box.sep * cor
elif isinstance(box, DrawingArea):
box.width = self.handlelength*fontsize
box.height = height
box.xdescent = 0.
box.ydescent=descent
self._last_fontsize_points = fontsize
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
assert self.isaxes # should always hold because function is only called internally
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self._drawFrame = b
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch', [h for h in self.legendHandles if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def get_window_extent(self):
'return a extent of the the legend'
return self.legendPatch.get_window_extent()
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding "best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self.fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
assert self.isaxes # should always hold because function is only called internally
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.parent.bbox, renderer) for x in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
| agpl-3.0 |
facaiy/spark | python/pyspark/testing/sqlutils.py | 7 | 8090 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, UserDefinedType, Row
from pyspark.testing.utils import ReusedPySparkTestCase
from pyspark.util import _exception_message
pandas_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = _exception_message(e)
pyarrow_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = _exception_message(e)
test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
test_not_compiled_message = _exception_message(e)
have_pandas = pandas_requirement_message is None
have_pyarrow = pyarrow_requirement_message is None
test_compiled = test_not_compiled_message is None
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super(ReusedSQLTestCase, cls).setUpClass()
cls.spark = SparkSession(cls.sc)
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def assertPandasEqual(self, expected, result):
msg = ("DataFrames are not equal: " +
"\n\nExpected:\n%s\n%s" % (expected, expected.dtypes) +
"\n\nResult:\n%s\n%s" % (result, result.dtypes))
self.assertTrue(expected.equals(result), msg=msg)
| apache-2.0 |
ashhher3/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
harisbal/pandas | pandas/tests/frame/test_alter_axes.py | 1 | 53247 | # -*- coding: utf-8 -*-
from __future__ import print_function
import inspect
import pytest
from datetime import datetime, timedelta
import numpy as np
from pandas.compat import lrange, PY2
from pandas import (DataFrame, Series, Index, MultiIndex, RangeIndex,
IntervalIndex, DatetimeIndex, Categorical, cut,
Timestamp, date_range, to_datetime)
from pandas.core.dtypes.common import (
is_object_dtype,
is_categorical_dtype,
is_interval_dtype)
import pandas.util.testing as tm
class TestDataFrameAlterAxes():
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with tm.assert_raises_regex(ValueError, 'Length mismatch'):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with tm.assert_raises_regex(ValueError, 'Length mismatch'):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A': [1.1, 2.2, 3.3], 'B': [5.0, 6.1, 7.2]},
index=[2010, 2011, 2012])
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'],
('tuple', 'as', 'label')])
@pytest.mark.parametrize('inplace', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols,
drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'],
('tuple', 'as', 'label')])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays([df.index] + [df[x] for x in keys],
names=[None] + keys)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'],
('tuple', 'as', 'label')])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols,
drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(['D'], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(['D'] + keys,
drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
expected = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index('key')
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize('box', [Series, Index, np.array,
list, tuple, iter, lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x])])
@pytest.mark.parametrize('append, index_name', [(True, None),
(True, 'B'), (True, 'test'), (False, None)])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_single_array(self, frame_of_index_cols,
drop, append, index_name, box):
df = frame_of_index_cols
df.index.name = index_name
key = box(df['B'])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with tm.assert_raises_regex(KeyError, msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/tuple/iter/list-of-list "forget" the name of B
name_mi = getattr(key, 'names', None)
name = [getattr(key, 'name', None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(['B'], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize('box', [Series, Index, np.array,
list, tuple, iter,
lambda x: MultiIndex.from_arrays([x])])
@pytest.mark.parametrize('append, index_name',
[(True, None), (True, 'A'), (True, 'B'),
(True, 'test'), (False, None)])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_arrays(self, frame_of_index_cols,
drop, append, index_name, box):
df = frame_of_index_cols
df.index.name = index_name
keys = ['A', box(df['B'])]
# np.array/list/tuple/iter "forget" the name of B
names = ['A', None if box in [np.array, list, tuple, iter] else 'B']
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(['A', 'B'], drop=False, append=append)
expected = expected.drop('A', axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize('box2', [Series, Index, np.array,
list, tuple, iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name])
@pytest.mark.parametrize('box1', [Series, Index, np.array,
list, tuple, iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name])
@pytest.mark.parametrize('append, index_name', [(True, None),
(True, 'A'), (True, 'test'), (False, None)])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop,
append, index_name, box1, box2):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df['A']), box2(df['A'])]
result = df.set_index(keys, drop=drop, append=append)
# if either box was iter, the content has been consumed; re-read it
keys = [box1(df['A']), box2(df['A'])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = False if (keys[0] is 'A' and keys[1] is 'A') else drop
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be illegal
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('append', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols,
drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(['A', 'B'], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with tm.assert_raises_regex(ValueError,
'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# with MultiIndex
with tm.assert_raises_regex(ValueError,
'Index has duplicate keys'):
df.set_index([df['A'], df['A']], verify_integrity=True)
@pytest.mark.parametrize('append', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_raise(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with tm.assert_raises_regex(KeyError, "['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(['foo', 'bar', 'baz'], drop=drop, append=append)
# non-existent key in list with arrays
with tm.assert_raises_regex(KeyError, 'X'):
df.set_index([df['A'], df['B'], 'X'], drop=drop, append=append)
msg = 'The parameter "keys" may only contain a combination of.*'
# forbidden type, e.g. set
with tm.assert_raises_regex(TypeError, msg):
df.set_index(set(df['A']), drop=drop, append=append)
# forbidden type in list, e.g. set
with tm.assert_raises_regex(TypeError, msg):
df.set_index(['A', df['A'], set(df['A'])],
drop=drop, append=append)
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = 'B'
# with Categorical
df = DataFrame({'A': np.random.randn(10),
'B': ci.values})
idf = df.set_index('B')
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({'A': np.random.randn(10),
'B': ci})
idf = df.set_index('B')
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index('B')
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(to_datetime(['2013-1-1 13:00',
'2013-1-2 14:00']),
name='B').tz_localize('US/Pacific')
df = DataFrame(np.random.randn(2, 1), columns=['A'])
expected = Series(np.array([Timestamp('2013-01-01 13:00:00-0800',
tz='US/Pacific'),
Timestamp('2013-01-02 14:00:00-0800',
tz='US/Pacific')],
dtype="object"), name='B')
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df['B'] = idx
result = df['B']
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
df['B'] = idx.to_series(index=[0, 1])
result = df['B']
comp = Series(DatetimeIndex(expected.values).tz_localize(None),
name='B')
tm.assert_series_equal(result, comp)
# list of datetimes with a tz
df['B'] = idx.to_pydatetime()
result = df['B']
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame(
[{'ts': datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo': 1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range('1/1/2011', periods=5,
freq='D', tz=tz, name='idx')
df = DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},
index=idx)
expected = DataFrame({'idx': [datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"],
utc=True).tz_convert('Europe/Rome')
df = DataFrame({'A': idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range('2006-10-29 00:00:00', periods=3,
freq='H', tz='US/Pacific')
df = DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=di).reset_index()
# single level
res = df.set_index('index')
exp = DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=Index(di, name='index'))
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(['index', 'a'])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]],
names=['index', 'a'])
exp = DataFrame({'b': [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name='x')
original = DataFrame({'x': idx, 'y': np.arange(10)})[['x', 'y']]
result = original.set_index('x')
expected = DataFrame({'y': np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame([
{'a': 1, 'p': 0},
{'a': 2, 'm': 10},
{'a': 3, 'm': 11, 'p': 20},
{'a': 4, 'm': 12, 'p': 21}
], columns=('a', 'm', 'p', 'x'))
result = df.set_index(['a', 'x'])
expected = df[['m', 'p']]
expected.index = MultiIndex.from_arrays([df['a'], df['x']],
names=['a', 'x'])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with tm.assert_raises_regex(ValueError, 'Length mismatch'):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(datetime(2015, 10, 1),
datetime(2015, 10, 1, 23),
freq='H', tz='US/Eastern')
df = DataFrame(np.random.randn(24, 1), columns=['a'], index=index)
new_index = date_range(datetime(2015, 10, 2),
datetime(2015, 10, 2, 23),
freq='H', tz='US/Eastern')
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(renamed2.rename(columns=str.upper),
float_frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index, Index(['foo', 'bar']))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(['BAR', 'FOO']))
# have to pass something
pytest.raises(TypeError, float_frame.rename)
# partial columns
renamed = float_frame.rename(columns={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.columns, Index(['A', 'B', 'foo', 'bar']))
# other axis
renamed = float_frame.T.rename(index={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.index, Index(['A', 'B', 'foo', 'bar']))
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index,
Index(['bar', 'foo'], name='name'))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis('foo')
result = float_frame.copy()
no_return = result.rename_axis('foo', inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis('bar', axis=1)
result = float_frame.copy()
no_return = result.rename_axis('bar', axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_warns(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with tm.assert_produces_warning(FutureWarning) as w:
df.rename_axis(id, axis=0)
assert 'rename' in str(w[0].message)
with tm.assert_produces_warning(FutureWarning) as w:
df.rename_axis({0: 10, 1: 20}, axis=0)
assert 'rename' in str(w[0].message)
with tm.assert_produces_warning(FutureWarning) as w:
df.rename_axis(id, axis=1)
assert 'rename' in str(w[0].message)
with tm.assert_produces_warning(FutureWarning) as w:
df['A'].rename_axis(id)
assert 'rename' in str(w[0].message)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([['a', 'b', 'c'], [1, 2]],
names=['ll', 'nn'])
df = DataFrame({'x': [i for i in range(len(mi))],
'y': [i * 10 for i in range(len(mi))]},
index=mi)
# Test for rename of the Index object of columns
result = df.rename_axis('cols', axis=1)
tm.assert_index_equal(result.columns,
Index(['x', 'y'], name='cols'))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={'cols': 'new'}, axis=1)
tm.assert_index_equal(result.columns,
Index(['x', 'y'], name='new'))
# Test for renaming index using dict
result = df.rename_axis(index={'ll': 'foo'})
assert result.index.names == ['foo', 'nn']
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ['LL', 'NN']
# Test for renaming index providing complete list
result = df.rename_axis(index=['foo', 'goo'])
assert result.index.names == ['foo', 'goo']
# Test for changing index and columns at same time
sdf = df.reset_index().set_index('nn').drop(columns=['ll', 'y'])
result = sdf.rename_axis(index='foo', columns='meh')
assert result.index.name == 'foo'
assert result.columns.name == 'meh'
# Test different error cases
with tm.assert_raises_regex(TypeError, 'Must pass'):
df.rename_axis(index='wrong')
with tm.assert_raises_regex(ValueError, 'Length of names'):
df.rename_axis(index=['wrong'])
with tm.assert_raises_regex(TypeError, 'bogus'):
df.rename_axis(bogus=None)
def test_rename_multiindex(self):
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(
tuples_columns, names=['fizz', 'buzz'])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar3')],
names=['foo', 'bar'])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples([('FIZZ1', 'buzz1'),
('FIZZ2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'BUZZ1'),
('fizz2', 'BUZZ2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar2')],
names=['foo', 'bar'])
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
assert (float_frame['C'] == 1.).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={'C': 'foo'})
assert 'C' in float_frame
assert 'foo' not in float_frame
c_id = id(float_frame['C'])
float_frame = float_frame.copy()
float_frame.rename(columns={'C': 'foo'}, inplace=True)
assert 'C' not in float_frame
assert 'foo' in float_frame
assert id(float_frame['foo']) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ['foo', 'bar'], 1: ['bah', 'bas'], 2: [1, 2]})
df = df.rename(columns={0: 'a'})
df = df.rename(columns={1: 'b'})
df = df.set_index(['a', 'b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1], [2]],
index=MultiIndex.from_tuples(
[('foo', 'bah'), ('bar', 'bas')],
names=['a', 'b']),
columns=['2001-01-01'])
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)],
columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)],
columns=["a"])
tm.assert_frame_equal(df, expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
tm.assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
tm.assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
tm.assert_frame_equal(result, expected)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(deleveled['first'], deleveled2['level_0'],
check_names=False)
tm.assert_series_equal(deleveled['second'], deleveled2['level_1'],
check_names=False)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name='index')
tm.assert_series_equal(rdf['index'], exp)
# default name assigned, corner case
df = float_frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
exp = Series(float_frame.index.values, name='level_0')
tm.assert_series_equal(rdf['level_0'], exp)
# but this is ok
float_frame.index.name = 'index'
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled['index'], Series(float_frame.index))
tm.assert_index_equal(deleveled.index,
Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = 'columns'
resetted = float_frame.reset_index()
assert resetted.columns.name == 'columns'
# only remove certain columns
df = float_frame.reset_index().set_index(['index', 'A', 'B'])
rs = df.reset_index(['A', 'B'])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(['index', 'A', 'B'])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(['index', 'A', 'B'])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index('A')
xp = float_frame.reset_index().set_index(['index', 'B'])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(['index', 'A', 'B'])
rs = df.reset_index('A', drop=True)
xp = float_frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'C', 'D'],
index=Index(range(2), name='x'))
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'C', 'D'])
for levels in ['A', 'B'], [0, 1]:
# With MultiIndex
result = df.set_index(['A', 'B']).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(['A', 'B']).reset_index(level=levels,
drop=True)
tm.assert_frame_equal(result, df[['C', 'D']])
# With single-level Index (GH 16263)
result = df.set_index('A').reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index('A').reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(['A']).reset_index(level=levels[0],
drop=True)
tm.assert_frame_equal(result, df[['B', 'C', 'D']])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ['A', 'B'], ['A']:
with tm.assert_raises_regex(KeyError, 'Level E '):
df.set_index(idx_lev).reset_index(level=['A', 'E'])
with tm.assert_raises_regex(IndexError, 'Too many levels'):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted['time'].dtype == np.float64
resetted = df.reset_index()
assert resetted['time'].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({'A': [np.nan, 'b', 'c'],
'B': [0, 1, 2],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, 2],
'C': [np.nan, 1.1, 2.2]})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({'A': ['a', 'b', 'c'],
'B': [np.nan, np.nan, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = DataFrame([[1, 2], [3, 4]],
columns=date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
tm.assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = DataFrame([[0, 0], [1, 1]], columns=['A', 'B'],
index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = DataFrame([[0, 0, 0], [1, 1, 1]],
columns=['index', 'A', 'B'],
index=RangeIndex(stop=2))
tm.assert_frame_equal(result, expected)
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = 'name'
assert df.set_index(df.index).index.names == ['name']
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'C', 'D'])
df = df.set_index(['A', 'B'])
assert df.set_index(df.index).index.names == ['A', 'B']
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(['C', 'D'])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_rename_objects(self, float_string_frame):
renamed = float_string_frame.rename(columns=str.upper)
assert 'FOO' in renamed
assert 'foo' not in renamed
def test_rename_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['X', 'Y'])
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y'])
result = df.rename(str.lower, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis='columns')
tm.assert_frame_equal(result, expected)
result = df.rename({"A": 'a', 'B': 'b'}, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename({"A": 'a', 'B': 'b'}, axis='columns')
tm.assert_frame_equal(result, expected)
# Index
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y'])
result = df.rename(str.lower, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis='index')
tm.assert_frame_equal(result, expected)
result = df.rename({'X': 'x', 'Y': 'y'}, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename({'X': 'x', 'Y': 'y'}, axis='index')
tm.assert_frame_equal(result, expected)
result = df.rename(mapper=str.lower, axis='index')
tm.assert_frame_equal(result, expected)
def test_rename_mapper_multi(self):
df = DataFrame({"A": ['a', 'b'], "B": ['c', 'd'],
'C': [1, 2]}).set_index(["A", "B"])
result = df.rename(str.upper)
expected = df.rename(index=str.upper)
tm.assert_frame_equal(result, expected)
def test_rename_positional_named(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y'])
result = df.rename(str.lower, columns=str.upper)
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_rename_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['0', '1'])
# Named target and axis
with tm.assert_raises_regex(TypeError, None):
df.rename(index=str.lower, axis=1)
with tm.assert_raises_regex(TypeError, None):
df.rename(index=str.lower, axis='columns')
with tm.assert_raises_regex(TypeError, None):
df.rename(index=str.lower, axis='columns')
with tm.assert_raises_regex(TypeError, None):
df.rename(columns=str.lower, axis='columns')
with tm.assert_raises_regex(TypeError, None):
df.rename(index=str.lower, axis=0)
# Multiple targets and axis
with tm.assert_raises_regex(TypeError, None):
df.rename(str.lower, str.lower, axis='columns')
# Too many targets
with tm.assert_raises_regex(TypeError, None):
df.rename(str.lower, str.lower, str.lower)
# Duplicates
with tm.assert_raises_regex(TypeError, "multiple values"):
df.rename(id, mapper=id)
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.reindex(['b', 'a'])
res2 = df.reindex(index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'])
res4 = df.reindex(labels=['b', 'a'], axis=0)
res5 = df.reindex(['b', 'a'], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=['e', 'd'])
res2 = df.reindex(['e', 'd'], axis=1)
res3 = df.reindex(labels=['e', 'd'], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=['b', 'a'], columns=['e', 'd'])
res2 = df.reindex(columns=['e', 'd'], index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'],
axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_rename_positional(self):
df = DataFrame(columns=['A', 'B'])
with tm.assert_produces_warning(FutureWarning) as rec:
result = df.rename(None, str.lower)
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
assert len(rec) == 1
message = str(rec[0].message)
assert 'rename' in message
assert 'Use named arguments' in message
def test_assign_columns(self, float_frame):
float_frame['hi'] = 'there'
df = float_frame.copy()
df.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
tm.assert_series_equal(float_frame['C'], df['baz'], check_names=False)
tm.assert_series_equal(float_frame['hi'], df['foo2'],
check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
for cols in ['C1', 'C2', ['A', 'C1'], ['A', 'C2'], ['C1', 'C2']]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_ambiguous_warns(self):
df = DataFrame({"A": [1, 2]})
with tm.assert_produces_warning(FutureWarning):
df.rename(id, id)
with tm.assert_produces_warning(FutureWarning):
df.rename({0: 10}, {"A": "B"})
@pytest.mark.skipif(PY2, reason="inspect.signature")
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {"self", "mapper", "index", "columns", "axis",
"inplace", "copy", "level"}
@pytest.mark.skipif(PY2, reason="inspect.signature")
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {"self", "labels", "index", "columns", "axis",
"limit", "copy", "level", "method",
"fill_value", "tolerance"}
def test_droplevel(self):
# GH20342
df = DataFrame([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]
])
df = df.set_index([0, 1]).rename_axis(['a', 'b'])
df.columns = MultiIndex.from_tuples([('c', 'e'), ('d', 'f')],
names=['level_1', 'level_2'])
# test that dropping of a level in index works
expected = df.reset_index('a', drop=True)
result = df.droplevel('a', axis='index')
tm.assert_frame_equal(result, expected)
# test that dropping of a level in columns works
expected = df.copy()
expected.columns = Index(['c', 'd'], name='level_1')
result = df.droplevel('level_2', axis='columns')
tm.assert_frame_equal(result, expected)
class TestIntervalIndex(object):
def test_setitem(self):
df = DataFrame({'A': range(10)})
s = cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df['B'] = s
df['C'] = np.array(s)
df['D'] = s.values
df['E'] = np.array(s.values)
assert is_categorical_dtype(df['B'])
assert is_interval_dtype(df['B'].cat.categories)
assert is_categorical_dtype(df['D'])
assert is_interval_dtype(df['D'].cat.categories)
assert is_object_dtype(df['C'])
assert is_object_dtype(df['E'])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B), check_names=False)
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df['B'], df['B'], check_names=False)
tm.assert_series_equal(df['B'], df['D'], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df['C'], df['C'], check_names=False)
tm.assert_series_equal(df['C'], df['E'], check_names=False)
def test_set_reset_index(self):
df = DataFrame({'A': range(10)})
s = cut(df.A, 5)
df['B'] = s
df = df.set_index('B')
df = df.reset_index()
def test_set_axis_inplace(self):
# GH14636
df = DataFrame({'A': [1.1, 2.2, 3.3],
'B': [5.0, 6.1, 7.2],
'C': [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012])
expected = {0: df.copy(),
1: df.copy()}
expected[0].index = list('abc')
expected[1].columns = list('abc')
expected['index'] = expected[0]
expected['columns'] = expected[1]
for axis in expected:
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
for inplace, warn in (None, FutureWarning), (True, None):
kwargs = {'inplace': inplace}
result = df.copy()
with tm.assert_produces_warning(warn):
result.set_axis(list('abc'), axis=axis, **kwargs)
tm.assert_frame_equal(result, expected[axis])
# inplace=False
result = df.set_axis(list('abc'), axis=axis, inplace=False)
tm.assert_frame_equal(expected[axis], result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = df.set_axis(list('abc'), inplace=False)
tm.assert_frame_equal(result, expected[0])
# wrong values for the "axis" parameter
for axis in 3, 'foo':
with tm.assert_raises_regex(ValueError, 'No axis named'):
df.set_axis(list('abc'), axis=axis, inplace=False)
def test_set_axis_prior_to_deprecation_signature(self):
df = DataFrame({'A': [1.1, 2.2, 3.3],
'B': [5.0, 6.1, 7.2],
'C': [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012])
expected = {0: df.copy(),
1: df.copy()}
expected[0].index = list('abc')
expected[1].columns = list('abc')
expected['index'] = expected[0]
expected['columns'] = expected[1]
# old signature
for axis in expected:
with tm.assert_produces_warning(FutureWarning):
result = df.set_axis(axis, list('abc'), inplace=False)
tm.assert_frame_equal(result, expected[axis])
| bsd-3-clause |
manashmndl/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
unoebauer/public-astro-tools | wind_structure/wind_structure.py | 1 | 19134 | # MIT License
#
# Copyright (c) 2017 Ulrich Noebauer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module providing a number of basic calculators to determine the steady-state
structure of line-driven hot star winds analytically. In particular, the
predictions according to
* Castor, Abbott and Klein 1975
* Friend and Abbott 1986
* Kudritzki, Pauldrach, Puls and Abbott 1989
are included. Throughout this module, we rely heavily on the standard text
book on stellar winds by Lamers and Cassinelli 1999.
Note
----
We assume a so-called frozen-in ionization throughout the wind in all the wind
structure calculators (i.e. delta=0 in terms of the CAK force multipliers).
References
----------
* Castor, Abbott and Klein 1975 (CAK75)
'Radiation-driven winds in Of stars', Astrophysical Journal,
1975, 195, 157-174
* Friend and Abbott 1986 (FA86)
'The theory of radiatively driven stellar winds. III - Wind models with
finite disk correction and rotation', Astrophysical Journal,
1986, 311, 701-707
* Kudritzki, Pauldrach, Puls and Abbott 1989 (KPPA89)
'Radiation-driven winds of hot stars. VI - Analytical solutions for wind
models including the finite cone angle effect', Astronomy & Astrophysics,
1989, 219, 205-218
* Noebauer and Sim 2015 (NS15)
'Self-consistent modelling of line-driven hot-star winds with Monte Carlo
radiation hydrodynamics', Monthly Notices of the Royal Astronomical Society,
2015, 453, 3120-3134
* Lamers and Cassinelli 1999 (LC99)
'Introduction to Stellar Winds', Cambridge University Press, 1999
"""
from __future__ import print_function
import numpy as np
import scipy.integrate as integ
import astropy.units as units
import astropy.constants as csts
from astropy.utils.decorators import lazyproperty
def _test_unit(val, final_unit_string, initial_unit_string):
"""helper routine to add unit to a quantity and convert to different units
If val is not yet an astropy.units.quantity, it is assumed that it is
given in units specified by the initial_unit_string. The input is then
returned after converting to the units given by the final_unit_string.
Parameters
----------
val : float, int, np.ndarray
scalar or vector input, can either already be an astropy quantity or
not
final_unit_string : str
string describing the desired final units for the input
initial_unit_string : str
string describing the assumed initial units of the input
Returns
-------
res : astropy.units.quantity
input converted to units given by final_unit_string
"""
try:
val.to(final_unit_string)
except AttributeError:
val = val * units.Unit(initial_unit_string)
res = val.to(final_unit_string)
return res
class StarBase(object):
"""Base class containing the fundamental properties of the central star
Parameters
---------
mass : float, astropy.units.quantity
stellar mass, if dimensionless, it is assumed to be in units of solar
masses (default 52.5)
lum : float, astropy.units.quantity
stellar luminosity, if dimensionless, it is assumed to be in units of
solar luminosities (default 1e6)
teff : float, astropy.units.quantity
effective temperature, if dimensionless, it is assumed to be Kelvin
(default 4.2e4)
gamma : float
Eddington factor due to electron scattering (default 0)
sigma : float, astropy.units.quantity
reference specific electron scattering cross section, if dimensionless
it is assumed to be in units of cm^2/g (default 0.3)
"""
def __init__(self, mass=52.5, lum=1e6, teff=4.2e4, gamma=0, sigma=0.3):
self.mass = mass
self.lum = lum
self.teff = teff
self.sigma = sigma
self.gamma = gamma
@property
def mass(self):
return self._mass
@mass.setter
def mass(self, val):
val = _test_unit(val, "g", "solMass")
self._mass = val
@property
def lum(self):
return self._lum
@lum.setter
def lum(self, val):
val = _test_unit(val, "erg/s", "solLum")
self._lum = val
@property
def teff(self):
return self._teff
@teff.setter
def teff(self, val):
val = _test_unit(val, "K", "K")
self._teff = val
@property
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, val):
val = _test_unit(val, "cm^2/g", "cm^2/g")
self._sigma = val
@lazyproperty
def rad(self):
"""stellar radius"""
rad = np.sqrt(self.lum /
(4 * np.pi * csts.sigma_sb * self.teff**4))
return rad
@lazyproperty
def vth(self):
"""thermal velocity (see LC99, eqs. 8.8, 8.83)"""
vth = np.sqrt(2. * self.teff * csts.k_B / csts.u)
return vth
@lazyproperty
def vesc(self):
"""escape velocity from stellar surface, accounting for electron
scattering (see LC99, eq. 2.39)"""
vesc = np.sqrt(2. * csts.G * self.mass * (1. - self.gamma) / self.rad)
return vesc
class WindBase(object):
"""Base class containing the fundamental properties of the wind
Parameters
----------
alpha : float
CAK force multiplier parameter, see LC99, eq. 8.86 (default 0.6)
k : float
CAK force multiplier parameter, see LC99, eq. 8.86 (default 0.5)
"""
def __init__(self, alpha=0.6, k=0.5):
self.alpha = alpha
self.k = k
class WindStructureBase(object):
"""Base class describing the basic structure of a star+wind system
Parameters
----------
mstar : float, astropy.units.quantity
stellar mass, see StarBase for details (default 52.5)
lstar : float, astropy.units.quantity
stellar luminosity, see StarBase for details (default 1e6)
teff : float, astropy.units.quantity
effective temperature, see StarBase for details (default 4.2e4)
alpha : float
force multiplier parameter, see WindBase for details (default, 0.6)
k : float
force multiplier parameter, see WindBase for details (default, 0.5)
gamma : float
Eddington factor, see StarBase for details (default 0)
sigma : float, astropy.units.quantity
reference electron scattering cross section, see StarBase for details
(default 0.3)
"""
def __init__(self, mstar=52.5, lstar=1e6, teff=4.2e4, alpha=0.6, k=0.5,
gamma=0, sigma=0.3):
self.star = StarBase(mass=mstar, lum=lstar, teff=teff, gamma=gamma,
sigma=sigma)
self.wind = WindBase(alpha=alpha, k=k)
@lazyproperty
def eta(self):
"""wind efficiency (see LC99, eq. 8.20)"""
eta = (self.mdot * self.vterm * csts.c / self.star.lum).to("").value
return eta
@lazyproperty
def t(self):
"""CAK dimensionless optical depth, c.f. LC99, eq. 8.82 and 8.104"""
t = (self.mdot * self.star.sigma * self.star.vth /
(2. * np.pi * self.vterm**2 * self.star.rad)).to("")
return t
@lazyproperty
def m(self):
"""CAK force multiplier, assuming frozen-in ionization, c.f. LC99, eq.
8.86 with delta=0"""
m = self.wind.k * self.t**(-self.wind.alpha)
return m
class BaseVelocityDensityMixin(object):
"""Mixin class providing routines to calculate the wind velocity and
density structure"""
def v(self, x):
"""calculate wind velocity according to CAK at given location
C.f. LC99, eq. 8.104
Parameters
----------
x : float, np.ndarray
dimensionless position, i.e. r/Rstar
Returns
-------
v : float, np.ndarry
wind velocity
"""
return (self.vterm * np.sqrt(1. - 1. / x)).to("km/s")
def rho(self, x):
"""calculate wind density at given location
C.f. LC99, eq. 3.1
Parameters
----------
x : float, np.ndarray
dimensionless position, i.e. r/Rstar
Returns
-------
rho : float, np.ndarry
wind density
"""
r = self.star.rad * x
return (self.mdot /
(4. * np.pi * r**2 * self.v(x))).to("g/cm^3")
class BaseCakStructureMixin(object):
"""Mixin class providing the CAK mass loss rate and terminal wind speed"""
@lazyproperty
def mdot_cak(self):
"""Mass-loss rate according to CAK75, see LC99, eq. 8.105"""
mdot_cak = ((4. * np.pi / (self.star.sigma * self.star.vth)) *
(self.star.sigma / (4. * np.pi))**(1. / self.wind.alpha) *
((1. - self.wind.alpha) / self.wind.alpha)**(
(1. - self.wind.alpha) / self.wind.alpha) *
(self.wind.alpha * self.wind.k)**(1. / self.wind.alpha) *
(self.star.lum / csts.c)**(1. / self.wind.alpha) *
(csts.G * self.star.mass * (1. - self.star.gamma))**(
(self.wind.alpha - 1.) / self.wind.alpha))
return mdot_cak.to("Msun/yr")
@lazyproperty
def vterm_cak(self):
"""Terminal wind speed according to CAK75, see LC99, eq. 8.104"""
vterm_cak = (np.sqrt(self.wind.alpha / (1. - self.wind.alpha)) *
self.star.vesc)
return vterm_cak.to("km/s")
class WindStructureCak75(WindStructureBase, BaseCakStructureMixin,
BaseVelocityDensityMixin):
"""Wind Structure Calculator based on the approach by CAK75.
The wind structure is determined based on a CAK line-driving force,
assuming a frozen-in ionization state and a central point source.
Paramters
---------
see WindStructureBase
"""
def __init__(self, mstar=52.5, lstar=1e6, teff=4.2e4, alpha=0.6, k=0.5,
gamma=0, sigma=0.3):
super(WindStructureCak75, self).__init__(mstar=mstar, lstar=lstar,
teff=teff, alpha=alpha,
k=k, gamma=gamma,
sigma=sigma)
@property
def mdot(self):
"""mass loss rate, equal to basic CAK mass loss rate"""
return self.mdot_cak.to("Msun/yr")
@property
def vterm(self):
"""terminal wind speed, equal to basic CAK terminal velocity"""
return self.vterm_cak.to("km/s")
class WindStructureKppa89(WindStructureBase, BaseCakStructureMixin,
BaseVelocityDensityMixin):
"""Wind Structure Calculator based on the approach by KPPA89.
The wind structure is determined based on a CAK line-driving force,
assuming a frozen-in ionization state but taking the finite size of the
central star into account.
Paramters
---------
see WindStructureBase
beta : float
exponent for the beta-type velocity law (default 0.8, see LC99, sec.
8.9.2 iii)
"""
def __init__(self, mstar=52.5, lstar=1e6, teff=4.2e4, alpha=0.6, k=0.5,
gamma=0, sigma=0.3, beta=0.8):
super(WindStructureKppa89, self).__init__(mstar=mstar, lstar=lstar,
teff=teff, alpha=alpha,
k=k, gamma=gamma,
sigma=sigma)
self.f1 = 1. / (self.wind.alpha + 1.)
self.beta = beta
@lazyproperty
def mdot(self):
"""mass loss rate, see KPPA89, eq. 31"""
mdot = self.f1**(1. / self.wind.alpha) * self.mdot_cak
return mdot.to("Msun/yr")
@lazyproperty
def vterm(self):
"""terminal wind speed, see KPPA89, eq. 39"""
vterm = self.vterm_cak * np.sqrt(integ.quad(self.z, 0, 1)[0])
return vterm.to("km/s")
def h(self, x):
"""see KPPA89, eq. 14"""
return (x - 1.) / self.beta
def f(self, x):
"""see KPPA89, eqs. 16, 15"""
return (1. / (self.wind.alpha + 1.) * x**2 / (1. - self.h(x)) *
(1. - (1. - 1. / x**2 + self.h(x) / x**2)**(
self.wind.alpha + 1.)))
def fn(self, x):
"""see KPPA89, eq. 35"""
return self.f(x) / self.f1
def z(self, u):
"""see KPPA89, eq. 36"""
x = 1. / u
z = (self.fn(x)**(1. / (1. - self.wind.alpha)) *
(1. + np.sqrt(2. / self.wind.alpha *
(1. - (1. / self.fn(x))**(
1. / (1. - self.wind.alpha))))))
return z
def _v_scalar(self, x):
"""see KPPA89, eq. 36"""
u = 1. / x
I = integ.quad(self.z, u, 1)[0]
vesc2 = (2. * csts.G * self.star.mass *
(1. - self.star.gamma) / self.star.rad)
v = np.sqrt(self.wind.alpha / (1. - self.wind.alpha) * vesc2 * I).to(
"km/s")
return v.value
def v(self, x):
"""calculate wind velocity according to KPPA89 at given location
Parameters
----------
x : float, np.ndarray
dimensionless position, i.e. r/Rstar
Returns
-------
v : float, np.ndarry
wind velocity
"""
if type(x) is np.ndarray:
v = np.array([self._v_scalar(xi) for xi in x])
else:
v = self._v_scalar(x)
v = v * units.km / units.s
return v.to("km/s")
class WindStructureFa86(WindStructureBase, BaseCakStructureMixin,
BaseVelocityDensityMixin):
"""Wind Structure Calculator based on the approach by FA86.
The wind structure is determined based on a CAK line-driving force,
assuming a frozen-in ionization state but taking the finite size of the
central star into account. All expressions for the wind properties result
from fits to the numerical simulations as presented by FA86.
Paramters
---------
see WindStructureBase
"""
def __init__(self, mstar=52.5, lstar=1e6, teff=4.2e4, alpha=0.6, k=0.5,
gamma=0, sigma=0.3):
super(WindStructureFa86, self).__init__(mstar=mstar, lstar=lstar,
teff=teff, alpha=alpha,
k=k, gamma=gamma,
sigma=sigma)
@lazyproperty
def mdot(self):
"""see FA86, eq. 9"""
mdot = (self.mdot_cak * 0.5 *
(self.star.vesc / (1e3 * units.km / units.s))**(-0.3))
return mdot.to("Msun/yr")
@lazyproperty
def vterm(self):
"""see FA86, eq. 8"""
vterm = (self.star.vesc * 2.2 * self.wind.alpha /
(1. - self.wind.alpha) *
(self.star.vesc / (1e3 * units.km / units.s))**0.2)
return vterm.to("km/s")
def v(self, x):
"""calculate wind velocity according to KPPA89 at given location
See FA86, eq. 11
Parameters
----------
x : float, np.ndarray
dimensionless position, i.e. r/Rstar
Returns
-------
v : float, np.ndarry
wind velocity
"""
v = self.vterm * (1. - 1. / x)**(0.8)
return v.to("km/s")
def example():
"""Example application of the wind structure calculators.
The parameters are adopted from NS15, table 3 and are appropriate for the
O star zeta-Puppis.
"""
import matplotlib
import os
if "DISPLAY" not in os.environ:
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.rcParams["text.usetex"] = False
mstar = 52.5 * units.solMass
lstar = 1e6 * units.solLum
teff = 4.2e4 * units.K
sigma = 0.3 * units.cm**2 / units.g
k = 0.381
alpha = 0.595
gamma = 0.502
x = np.logspace(-2, 2, 512) + 1
wind_cak75 = WindStructureCak75(mstar=mstar, lstar=lstar, teff=teff, k=k,
alpha=alpha, gamma=gamma, sigma=sigma)
wind_fa86 = WindStructureFa86(mstar=mstar, lstar=lstar, teff=teff, k=k,
alpha=alpha, gamma=gamma, sigma=sigma)
wind_kppa89 = WindStructureKppa89(mstar=mstar, lstar=lstar, teff=teff, k=k,
alpha=alpha, gamma=gamma, sigma=sigma)
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(wspace=0.3, hspace=0.3)
plt.subplot(221)
plt.plot(x, wind_cak75.v(x), label="CAK75")
plt.plot(x, wind_fa86.v(x), ls="dashed", label="FA86")
plt.plot(x, wind_kppa89.v(x), ls="dashdot", label="KPPA86")
plt.yscale("log")
plt.ylim([1e2, 3e3])
plt.xlim([0.8, 10])
plt.xlabel(r"$r/R_{\star}$")
plt.ylabel(r"$v$ [km/s]")
plt.legend(frameon=False)
plt.subplot(222)
plt.plot(x - 1, wind_cak75.v(x) / wind_cak75.vterm, label="CAK75")
plt.plot(x - 1, wind_fa86.v(x) / wind_fa86.vterm, ls="dashed",
label="FA86")
plt.plot(x - 1, wind_kppa89.v(x) / wind_kppa89.vterm, ls="dashdot",
label="KPPA86")
plt.xscale("log")
plt.xlim([1e-2, 1e2])
plt.ylim([0, 1])
plt.xlabel(r"$r/R_{\star} - 1$")
plt.ylabel(r"$v/v_{\infty}$")
plt.subplot(223)
plt.plot(x, wind_cak75.rho(x), label="CAK75")
plt.plot(x, wind_fa86.rho(x), ls="dashed", label="FA86")
plt.plot(x, wind_kppa89.rho(x), ls="dashdot", label="KPPA86")
plt.yscale("log")
plt.ylim([1e-15, 1e-10])
plt.xlim([0.8, 10])
plt.xlabel(r"$r/R_{\star}$")
plt.ylabel(r"$\rho$ $[\mathrm{g\,cm^{-3}}]$")
plt.subplot(224)
plt.plot(x, wind_cak75.mdot * np.ones(len(x)))
plt.plot(x, wind_fa86.mdot * np.ones(len(x)), ls="dashed")
plt.plot(x, wind_kppa89.mdot * np.ones(len(x)), ls="dashdot")
plt.yscale("log")
plt.xlim([0.8, 10])
plt.ylim([1e-5, 1e-4])
plt.xlabel(r"$r/R_{\star}$")
plt.ylabel(r"$\dot M$ $[\mathrm{M_{\odot}\,yr^{-1}}]$")
fig.savefig("wind_structure_example.pdf")
plt.show()
if __name__ == "__main__":
example()
| mit |
dparks1134/STAMP | stamp/plugins/multiGroups/plots/PostHocPlot.py | 1 | 18851 | #=======================================================================
# Author: Donovan Parks
#
# Post-hoc plot.
#
# Copyright 2011 Donovan Parks
#
# This file is part of STAMP.
#
# STAMP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# STAMP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with STAMP. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
from PyQt4 import QtGui, QtCore
import sys
import math
import operator
import numpy as np
from matplotlib.font_manager import FontProperties
from mpl_toolkits.axes_grid import make_axes_locatable, Size
from stamp.plugins.multiGroups.AbstractMultiGroupPlotPlugin import AbstractMultiGroupPlotPlugin, TestWindow, ConfigureDialog
from stamp.plugins.multiGroups.plots.configGUI.PostHocPlotUI import Ui_PostHocPlotDialog
from stamp.metagenomics import TableHelper
class PostHocPlot(AbstractMultiGroupPlotPlugin):
'''
Post-hoc plot.
'''
def __init__(self, preferences, parent=None):
AbstractMultiGroupPlotPlugin.__init__(self, preferences, parent)
self.name = 'Post-hoc plot'
self.type = 'Statistical'
self.bRunPostHocTest = True
self.settings = preferences['Settings']
self.figWidth = self.settings.value('multiple group: ' + self.name + '/width', 7.0).toDouble()[0]
self.figHeightPerRow = self.settings.value('multiple group: ' + self.name + '/row height', 0.2).toDouble()[0]
self.sortingField = self.settings.value('multiple group: ' + self.name + '/field', 'p-values').toString()
self.bShowBarPlot = self.settings.value('multiple group: ' + self.name + '/sequences subplot', True).toBool()
self.bShowPValueLabels = self.settings.value('multiple group: ' + self.name + '/p-value labels', True).toBool()
self.bCustomLimits = self.settings.value('multiple group: ' + self.name + '/use custom limits', False).toBool()
self.minX = self.settings.value('multiple group: ' + self.name + '/minimum', 0.0).toDouble()[0]
self.maxX = self.settings.value('multiple group: ' + self.name + '/maximum', 1.0).toDouble()[0]
self.markerSize = self.settings.value('multiple group: ' + self.name + '/marker size', 30).toInt()[0]
self.bShowStdDev = self.settings.value('multiple group: ' + self.name + '/show std. dev.', False).toBool()
self.endCapSize = self.settings.value('multiple group: ' + self.name + '/end cap size', 0.0).toInt()[0]
self.bPvalueFilter = self.settings.value('multiple group: ' + self.name + '/p-value filter', True).toBool()
def mirrorProperties(self, plotToCopy):
self.name = plotToCopy.name
self.figWidth = plotToCopy.figWidth
self.figHeightPerRow = plotToCopy.figHeightPerRow
self.sortingField = plotToCopy.sortingField
self.bShowBarPlot = plotToCopy.bShowBarPlot
self.bShowPValueLabels = plotToCopy.bShowPValueLabels
self.bCustomLimits = plotToCopy.bCustomLimits
self.minX = plotToCopy.minX
self.maxX = plotToCopy.maxX
self.markerSize = plotToCopy.markerSize
self.bShowStdDev = plotToCopy.bShowStdDev
self.endCapSize = plotToCopy.endCapSize
self.bPvalueFilter = plotToCopy.bPvalueFilter
def plot(self, profile, statsResults):
# *** Check if there is sufficient data to generate the plot
if len(statsResults.postHocResults.pValues) <= 0:
self.emptyAxis('No post-hoc test results')
return
if len(statsResults.postHocResults.pValues) > 200:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
reply = QtGui.QMessageBox.question(self, 'Continue?', 'Plots contains ' + str(len(statsResults.postHocResults.pValues)) + ' rows. ' +
'It may take several seconds to generate this plot. We recommend filtering the results first.' +
'Do you wish to continue?', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
QtGui.QApplication.instance().restoreOverrideCursor()
if reply == QtGui.QMessageBox.No:
self.emptyAxis('Too many rows.')
return
# *** Set plot properties
axesColour = str(self.preferences['Axes colour'].name())
highlightColor = (0.9, 0.9, 0.9)
# Apply p-value filter
labels = []
pValues = []
effectSizes = []
lowerCIs = []
upperCIs = []
if self.bPvalueFilter:
for i in xrange(0, len(statsResults.postHocResults.labels)):
# get numeric p-value
if isinstance(statsResults.postHocResults.pValues[i], str):
pValueSplit = statsResults.postHocResults.pValues[i].split(' ')
if pValueSplit[0][0] == '<':
pValue = float(pValueSplit[1]) - 1e-6
else:
pValue = 1.0
else:
pValue = statsResults.postHocResults.pValues[i]
# check if p-value should be filtered
if pValue <= statsResults.postHocResults.alpha:
labels.append(statsResults.postHocResults.labels[i])
pValues.append(statsResults.postHocResults.pValues[i])
effectSizes.append(statsResults.postHocResults.effectSizes[i])
lowerCIs.append(statsResults.postHocResults.lowerCIs[i])
upperCIs.append(statsResults.postHocResults.upperCIs[i])
else:
labels = list(statsResults.postHocResults.labels)
pValues = list(statsResults.postHocResults.pValues)
effectSizes = list(statsResults.postHocResults.effectSizes)
lowerCIs = list(statsResults.postHocResults.lowerCIs)
upperCIs = list(statsResults.postHocResults.upperCIs)
if len(labels) == 0:
self.emptyAxis('No rows above nominal level.')
return
# *** Determine dominant group for each contrast (i.e., row).
# Adjust labels and effect sizes to reflect the dominant group.
for i in xrange(0, len(effectSizes)):
labelSplit = labels[i].split(':')
if effectSizes[i] > 0.0:
lowerCIs[i] = effectSizes[i] - lowerCIs[i]
upperCIs[i] = upperCIs[i] - effectSizes[i]
else:
labels[i] = labelSplit[1].strip() + ' : ' + labelSplit[0].strip()
lowerCIs[i] = effectSizes[i] - lowerCIs[i]
upperCIs[i] = upperCIs[i] - effectSizes[i]
effectSizes[i] = -effectSizes[i]
# *** Sort data
data = zip(labels, pValues, effectSizes, lowerCIs, upperCIs)
if self.sortingField == 'p-values':
data = sorted(data, key = operator.itemgetter(1), reverse = True)
elif self.sortingField == 'Effect sizes':
data = sorted(data, key = operator.itemgetter(2))
elif self.sortingField == 'Group labels':
data = sorted(data, key = lambda row: row[0].lower(), reverse = True)
labels, pValues, effectSizes, lowerCIs, upperCIs = zip(*data)
labels = list(labels)
pValues = list(pValues)
effectSizes = list(effectSizes)
lowerCIs = list(lowerCIs)
upperCIs = list(upperCIs)
# *** Make list of which group is dominant in each contrast.
dominantGroup = {}
for i in xrange(0, len(effectSizes)):
labelSplit = labels[i].split(':')
groupName = labelSplit[0].strip()
if groupName in dominantGroup:
dominantGroup[groupName][0].append(effectSizes[i])
dominantGroup[groupName][1].append(i)
else:
dominantGroup[groupName] = [[effectSizes[i]],[i]]
# *** Create p-value labels
pValueTitle = 'p-value'
pValueLabels = []
for pValue in pValues:
if isinstance(pValue, str):
pValueSplit = pValue.split(' ')
if pValue[0] == '<':
pValueLabels.append(r'$<$' + pValueSplit[1])
else:
pValueLabels.append(r'$\geq$' + pValueSplit[1])
else:
pValueLabels.append(statsResults.getPValueStr(pValue))
# *** Truncate labels
adjustedLabels = list(labels)
if self.preferences['Truncate feature names']:
length = self.preferences['Length of truncated feature names']
for i in xrange(0, len(labels)):
if len(labels[i]) > length+3:
adjustedLabels[i] = labels[i][0:length] + '...'
# *** Set figure size
plotHeight = self.figHeightPerRow*len(adjustedLabels)
self.imageWidth = self.figWidth
self.imageHeight = plotHeight + 0.65 # 0.65 inches for bottom and top labels
if self.imageWidth > 256 or self.imageHeight > 256:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.emptyAxis()
reply = QtGui.QMessageBox.question(self, 'Excessively large plot', 'The resulting plot is too large to display.')
QtGui.QApplication.instance().restoreOverrideCursor()
return
self.fig.set_size_inches(self.imageWidth, self.imageHeight)
# *** Determine width of y-axis labels
yLabelBounds = self.yLabelExtents(adjustedLabels, 8)
# *** Size plots which comprise the extended errorbar plot
self.fig.clear()
heightBottomLabels = 0.4 # inches
spacingBetweenPlots = 0.25 # inches
widthNumSeqPlot = 1.25 # inches
if self.bShowBarPlot == False:
widthNumSeqPlot = 0.0
spacingBetweenPlots = 0.0
widthPvalueLabels = 0.75 # inches
if self.bShowPValueLabels == False:
widthPvalueLabels = 0.1
yPlotOffsetFigSpace = heightBottomLabels / self.imageHeight
heightPlotFigSpace = plotHeight / self.imageHeight
xPlotOffsetFigSpace = yLabelBounds.width + 0.1 / self.imageWidth
pValueLabelWidthFigSpace = widthPvalueLabels / self.imageWidth
widthPlotFigSpace = 1.0 - pValueLabelWidthFigSpace - xPlotOffsetFigSpace
widthErrorBarPlot = widthPlotFigSpace*self.imageWidth - widthNumSeqPlot - spacingBetweenPlots
axInitAxis = self.fig.add_axes([xPlotOffsetFigSpace,yPlotOffsetFigSpace,widthPlotFigSpace,heightPlotFigSpace])
divider = make_axes_locatable(axInitAxis)
divider.get_vertical()[0] = Size.Fixed(len(labels)*self.figHeightPerRow)
self.fig.text(0.0,1.0,self.preferences['Selected multiple group feature'], va='top', ha='left')
if self.bShowBarPlot == True:
divider.get_horizontal()[0] = Size.Fixed(widthNumSeqPlot)
axErrorbar = divider.new_horizontal(widthErrorBarPlot, pad=spacingBetweenPlots, sharey=axInitAxis)
self.fig.add_axes(axErrorbar)
else:
divider.get_horizontal()[0] = Size.Fixed(widthErrorBarPlot)
axErrorbar = axInitAxis
# *** Plot of sequences for each subsystem
if self.bShowBarPlot == True:
axNumSeq = axInitAxis
# get relative frequency and standard deviation of each contrast
maxPercentage = 0
for i in xrange(0, len(labels)):
splitLabel = labels[i].split(':')
groupName1 = splitLabel[0].strip()
groupName2 = splitLabel[1].strip()
colour1 = str(self.preferences['Group colours'][groupName1].name())
colour2 = str(self.preferences['Group colours'][groupName2].name())
meanRelFreq1 = statsResults.getDataFromTable(statsResults.postHocResults.feature, groupName1 + ': mean rel. freq. (%)')
meanRelFreq2 = statsResults.getDataFromTable(statsResults.postHocResults.feature, groupName2 + ': mean rel. freq. (%)')
if self.bShowStdDev:
stdDev1 = statsResults.getDataFromTable(statsResults.postHocResults.feature, groupName1 + ': std. dev. (%)')
stdDev2 = statsResults.getDataFromTable(statsResults.postHocResults.feature, groupName2 + ': std. dev. (%)')
endCapSize = self.endCapSize
else:
stdDev1 = 0
stdDev2 = 0
endCapSize = 0
if meanRelFreq1 + stdDev1 > maxPercentage:
maxPercentage = meanRelFreq1 + stdDev1
if meanRelFreq2 + stdDev2 > maxPercentage:
maxPercentage = meanRelFreq2 + stdDev2
axNumSeq.barh(i+0.0, meanRelFreq1, height = 0.3, xerr=stdDev1, color=colour1, ecolor='black', capsize=endCapSize)
axNumSeq.barh(i-0.3, meanRelFreq2, height = 0.3, xerr=stdDev2, color=colour2, ecolor='black', capsize=endCapSize)
for value in np.arange(-0.5, len(labels)-1, 2):
axNumSeq.axhspan(value, value+1, facecolor=highlightColor, edgecolor='none', zorder=-1)
axNumSeq.set_xlabel('Mean proportion (%)')
axNumSeq.set_xticks([0, maxPercentage])
axNumSeq.set_xlim([0, maxPercentage*1.05])
maxPercentageStr = '%.1f' % maxPercentage
axNumSeq.set_xticklabels(['0.0', maxPercentageStr])
axNumSeq.set_yticks(np.arange(len(labels)))
axNumSeq.set_yticklabels(adjustedLabels)
axNumSeq.set_ylim([-1, len(labels)])
for a in axNumSeq.yaxis.majorTicks:
a.tick1On=False
a.tick2On=False
for a in axNumSeq.xaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for line in axNumSeq.yaxis.get_ticklines():
line.set_color(axesColour)
for line in axNumSeq.xaxis.get_ticklines():
line.set_color(axesColour)
for loc, spine in axNumSeq.spines.iteritems():
if loc in ['left', 'right','top']:
spine.set_color('none')
else:
spine.set_color(axesColour)
# *** Plot confidence intervals for each subsystem
lastAxes = axErrorbar
markerSize = math.sqrt(float(self.markerSize))
axErrorbar.errorbar(effectSizes, np.arange(len(labels)), xerr=[lowerCIs,upperCIs], fmt='o', ms=markerSize, mfc='black', mec='black', ecolor='black', zorder=10)
for groupName in dominantGroup:
colour = str(self.preferences['Group colours'][groupName].name())
effectSizes = dominantGroup[groupName][0]
indices = dominantGroup[groupName][1]
axErrorbar.plot(effectSizes, indices, ls='', marker='o', ms=markerSize, mfc=colour, mec='black', zorder=100)
axErrorbar.vlines(0, -1, len(labels), linestyle='dashed', color=axesColour)
for value in np.arange(-0.5, len(labels)-1, 2):
axErrorbar.axhspan(value, value+1, facecolor=highlightColor,edgecolor='none',zorder=1)
ciTitle = ('%.3g' % ((1.0-statsResults.postHocResults.alpha)*100)) + '% confidence intervals'
axErrorbar.set_title(ciTitle)
axErrorbar.set_xlabel('Difference in mean proportions (%)')
if self.bCustomLimits:
axErrorbar.set_xlim([self.minX, self.maxX])
else:
self.minX, self.maxX = axErrorbar.get_xlim()
if self.bShowBarPlot == False:
axErrorbar.set_yticks(np.arange(len(labels)))
axErrorbar.set_yticklabels(labels)
axErrorbar.set_ylim([-1, len(labels)])
else:
for label in axErrorbar.get_yticklabels():
label.set_visible(False)
for a in axErrorbar.yaxis.majorTicks:
a.set_visible(False)
for a in axErrorbar.xaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for a in axErrorbar.yaxis.majorTicks:
a.tick1On=False
a.tick2On=False
for line in axErrorbar.yaxis.get_ticklines():
line.set_visible(False)
for line in axErrorbar.xaxis.get_ticklines():
line.set_color(axesColour)
for loc, spine in axErrorbar.spines.iteritems():
if loc in ['left','right','top']:
spine.set_color('none')
else:
spine.set_color(axesColour)
# *** Show p-values on right of last plot
if self.bShowPValueLabels == True:
axRight = lastAxes.twinx()
axRight.set_yticks(np.arange(len(pValueLabels)))
axRight.set_yticklabels(pValueLabels)
axRight.set_ylim([-1, len(pValueLabels)])
axRight.set_ylabel(pValueTitle)
for a in axRight.yaxis.majorTicks:
a.tick1On=False
a.tick2On=False
for loc, spine in axRight.spines.iteritems():
spine.set_color('none')
self.updateGeometry()
self.draw()
def configure(self, profile, statsResults):
self.configDlg = ConfigureDialog(Ui_PostHocPlotDialog)
# set enabled state of controls
self.configDlg.ui.chkShowStdDev.setChecked(self.bShowBarPlot)
self.configDlg.ui.spinEndCapSize.setValue(self.bShowBarPlot)
self.configDlg.ui.spinMinimumX.setEnabled(self.bCustomLimits)
self.configDlg.ui.spinMaximumX.setEnabled(self.bCustomLimits)
# set current value of controls
self.configDlg.ui.cboSortingField.setCurrentIndex(self.configDlg.ui.cboSortingField.findText(self.sortingField))
self.configDlg.ui.spinFigWidth.setValue(self.figWidth)
self.configDlg.ui.spinFigRowHeight.setValue(self.figHeightPerRow)
self.configDlg.ui.chkShowBarPlot.setChecked(self.bShowBarPlot)
self.configDlg.ui.chkPValueLabels.setChecked(self.bShowPValueLabels)
self.configDlg.ui.chkCustomLimits.setChecked(self.bCustomLimits)
self.configDlg.ui.spinMinimumX.setValue(self.minX)
self.configDlg.ui.spinMaximumX.setValue(self.maxX)
self.configDlg.ui.spinMarkerSize.setValue(self.markerSize)
self.configDlg.ui.chkShowStdDev.setChecked(self.bShowStdDev)
self.configDlg.ui.spinEndCapSize.setValue(self.endCapSize)
self.configDlg.ui.chkFilterPvalue.setChecked(self.bPvalueFilter)
if self.configDlg.exec_() == QtGui.QDialog.Accepted:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
self.sortingField = str(self.configDlg.ui.cboSortingField.currentText())
self.figWidth = self.configDlg.ui.spinFigWidth.value()
self.figHeightPerRow = self.configDlg.ui.spinFigRowHeight.value()
self.bShowBarPlot = self.configDlg.ui.chkShowBarPlot.isChecked()
self.bShowPValueLabels = self.configDlg.ui.chkPValueLabels.isChecked()
self.bCustomLimits = self.configDlg.ui.chkCustomLimits.isChecked()
self.minX = self.configDlg.ui.spinMinimumX.value()
self.maxX = self.configDlg.ui.spinMaximumX.value()
self.markerSize = self.configDlg.ui.spinMarkerSize.value()
self.bShowStdDev = self.configDlg.ui.chkShowStdDev.isChecked()
self.endCapSize = self.configDlg.ui.spinEndCapSize.value()
self.bPvalueFilter = self.configDlg.ui.chkFilterPvalue.isChecked()
self.settings.setValue('multiple group: ' + self.name + '/width', self.figWidth)
self.settings.setValue('multiple group: ' + self.name + '/row height', self.figHeightPerRow)
self.settings.setValue('multiple group: ' + self.name + '/field', self.sortingField)
self.settings.setValue('multiple group: ' + self.name + '/sequences subplot', self.bShowBarPlot)
self.settings.setValue('multiple group: ' + self.name + '/p-value labels', self.bShowPValueLabels)
self.settings.setValue('multiple group: ' + self.name + '/use custom limits', self.bCustomLimits)
self.settings.setValue('multiple group: ' + self.name + '/minimum', self.minX)
self.settings.setValue('multiple group: ' + self.name + '/maximum', self.maxX)
self.settings.setValue('multiple group: ' + self.name + '/marker size', self.markerSize)
self.settings.setValue('multiple group: ' + self.name + '/show std. dev.', self.bShowStdDev)
self.settings.setValue('multiple group: ' + self.name + '/end cap size', self.endCapSize)
self.settings.setValue('multiple group: ' + self.name + '/p-value filter', self.bPvalueFilter)
self.plot(profile, statsResults)
QtGui.QApplication.instance().restoreOverrideCursor()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
testWindow = TestWindow(ExtendedErrorBar)
testWindow.show()
sys.exit(app.exec_())
| gpl-3.0 |
jm-begon/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
yarikoptic/pystatsmodels | statsmodels/nonparametric/_kernel_base.py | 3 | 17942 | """
Module containing the base object for multivariate kernel density and
regression, plus some utilities.
"""
import copy
import numpy as np
from scipy import optimize
from scipy.stats.mstats import mquantiles
try:
import joblib
has_joblib = True
except ImportError:
has_joblib = False
import kernels
kernel_func = dict(wangryzin=kernels.wang_ryzin,
aitchisonaitken=kernels.aitchison_aitken,
gaussian=kernels.gaussian,
aitchison_aitken_reg = kernels.aitchison_aitken_reg,
wangryzin_reg = kernels.wang_ryzin_reg,
gauss_convolution=kernels.gaussian_convolution,
wangryzin_convolution=kernels.wang_ryzin_convolution,
aitchisonaitken_convolution=kernels.aitchison_aitken_convolution,
gaussian_cdf=kernels.gaussian_cdf,
aitchisonaitken_cdf=kernels.aitchison_aitken_cdf,
wangryzin_cdf=kernels.wang_ryzin_cdf,
d_gaussian=kernels.d_gaussian)
def _compute_min_std_IQR(data):
"""Compute minimum of std and IQR for each variable."""
s1 = np.std(data, axis=0)
q75 = mquantiles(data, 0.75, axis=0).data[0]
q25 = mquantiles(data, 0.25, axis=0).data[0]
s2 = (q75 - q25) / 1.349 # IQR
dispersion = np.minimum(s1, s2)
return dispersion
def _compute_subset(class_type, data, bw, co, do, n_cvars, ix_ord,
ix_unord, n_sub, class_vars, randomize, bound):
""""Compute bw on subset of data.
Called from ``GenericKDE._compute_efficient_*``.
Notes
-----
Needs to be outside the class in order for joblib to be able to pickle it.
"""
if randomize:
np.random.shuffle(data)
sub_data = data[:n_sub, :]
else:
sub_data = data[bound[0]:bound[1], :]
if class_type == 'KDEMultivariate':
from kernel_density import KDEMultivariate
var_type = class_vars[0]
sub_model = KDEMultivariate(sub_data, var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
elif class_type == 'KDEMultivariateConditional':
from kernel_density import KDEMultivariateConditional
k_dep, dep_type, indep_type = class_vars
endog = sub_data[:, :k_dep]
exog = sub_data[:, k_dep:]
sub_model = KDEMultivariateConditional(endog, exog, dep_type,
indep_type, bw=bw, defaults=EstimatorSettings(efficient=False))
elif class_type == 'KernelReg':
from kernel_regression import KernelReg
var_type, k_vars, reg_type = class_vars
endog = _adjust_shape(sub_data[:, 0], 1)
exog = _adjust_shape(sub_data[:, 1:], k_vars)
sub_model = KernelReg(endog=endog, exog=exog, reg_type=reg_type,
var_type=var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
else:
raise ValueError("class_type not recognized, should be one of " \
"{KDEMultivariate, KDEMultivariateConditional, KernelReg}")
# Compute dispersion in next 4 lines
if class_type == 'KernelReg':
sub_data = sub_data[:, 1:]
dispersion = _compute_min_std_IQR(sub_data)
fct = dispersion * n_sub**(-1. / (n_cvars + co))
fct[ix_unord] = n_sub**(-2. / (n_cvars + do))
fct[ix_ord] = n_sub**(-2. / (n_cvars + do))
sample_scale_sub = sub_model.bw / fct #TODO: check if correct
bw_sub = sub_model.bw
return sample_scale_sub, bw_sub
class GenericKDE (object):
"""
Base class for density estimation and regression KDE classes.
"""
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
self.bw_func = dict(normal_reference=self._normal_reference,
cv_ml=self._cv_ml, cv_ls=self._cv_ls)
if bw is None:
bwfunc = self.bw_func['normal_reference']
return bwfunc()
if not isinstance(bw, basestring):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
bwfunc = self.bw_func[bw]
res = bwfunc()
return res
def _compute_dispersion(self, data):
"""
Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
Notes
-----
Reimplemented in `KernelReg`, because the first column of `data` has to
be removed.
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion
"""
return _compute_min_std_IQR(data)
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset.
Needs to be implemented by subclasses."""
pass
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count('c')
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i+1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs) \
(joblib.delayed(_compute_subset) \
(class_type, data, bw, co, do, n_cvars, ix_ord, ix_unord, \
n_sub, class_vars, self.randomize, bounds[i]) \
for i in range(n_blocks))
else:
res = []
for i in xrange(n_blocks):
res.append(_compute_subset(class_type, data, bw, co, do,
n_cvars, ix_ord, ix_unord, n_sub,
class_vars, self.randomize,
bounds[i]))
for i in xrange(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs**(-1. / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs**(-2./ (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs**(-2./ (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw
def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs
def _normal_reference(self):
"""
Returns Scott's normal reference rule of thumb bandwidth parameter.
Notes
-----
See p.13 in [2] for an example and discussion. The formula for the
bandwidth is
.. math:: h = 1.06n^{-1/(4+q)}
where ``n`` is the number of observations and ``q`` is the number of
variables.
"""
X = np.std(self.data, axis=0)
return 1.06 * X * self.nobs ** (- 1. / (4 + self.data.shape[1]))
def _set_bw_bounds(self, bw):
"""
Sets bandwidth lower bound to effectively zero )1e-10), and for
discrete values upper bound to 1.
"""
bw[bw < 0] = 1e-10
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
bw[ix_ord] = np.minimum(bw[ix_ord], 1.)
bw[ix_unord] = np.minimum(bw[ix_unord], 1.)
return bw
def _cv_ml(self):
"""
Returns the cross validation maximum likelihood bandwidth parameter.
Notes
-----
For more details see p.16, 18, 27 in Ref. [1] (see module docstring).
Returns the bandwidth estimate that maximizes the leave-out-out
likelihood. The leave-one-out log likelihood function is:
.. math:: \ln L=\sum_{i=1}^{n}\ln f_{-i}(X_{i})
The leave-one-out kernel estimator of :math:`f_{-i}` is:
.. math:: f_{-i}(X_{i})=\frac{1}{(n-1)h}
\sum_{j=1,j\neq i}K_{h}(X_{i},X_{j})
where :math:`K_{h}` represents the Generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j})=\prod_{s=1}^
{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right)
"""
# the initial value for the optimization is the normal_reference
h0 = self._normal_reference()
bw = optimize.fmin(self.loo_likelihood, x0=h0, args=(np.log, ),
maxiter=1e3, maxfun=1e3, disp=0, xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def _cv_ls(self):
"""
Returns the cross-validation least squares bandwidth parameter(s).
Notes
-----
For more details see pp. 16, 27 in Ref. [1] (see module docstring).
Returns the value of the bandwidth that maximizes the integrated mean
square error between the estimated and actual distribution. The
integrated mean square error (IMSE) is given by:
.. math:: \int\left[\hat{f}(x)-f(x)\right]^{2}dx
This is the general formula for the IMSE. The IMSE differs for
conditional (``KDEMultivariateConditional``) and unconditional
(``KDEMultivariate``) kernel density estimation.
"""
h0 = self._normal_reference()
bw = optimize.fmin(self.imse, x0=h0, maxiter=1e3, maxfun=1e3, disp=0,
xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def loo_likelihood(self):
raise NotImplementedError
class EstimatorSettings(object):
"""
Object to specify settings for density estimation or regression.
`EstimatorSettings` has several proporties related to how bandwidth
estimation for the `KDEMultivariate`, `KDEMultivariateConditional`,
`KernelReg` and `CensoredKernelReg` classes behaves.
Parameters
----------
efficient: bool, optional
If True, the bandwidth estimation is to be performed
efficiently -- by taking smaller sub-samples and estimating
the scaling factor of each subsample. This is useful for large
samples (nobs >> 300) and/or multiple variables (k_vars > 3).
If False (default), all data is used at the same time.
randomize: bool, optional
If True, the bandwidth estimation is to be performed by
taking `n_res` random resamples (with replacement) of size `n_sub` from
the full sample. If set to False (default), the estimation is
performed by slicing the full sample in sub-samples of size `n_sub` so
that all samples are used once.
n_sub: int, optional
Size of the sub-samples. Default is 50.
n_res: int, optional
The number of random re-samples used to estimate the bandwidth.
Only has an effect if ``randomize == True``. Default value is 25.
return_median: bool, optional
If True (default), the estimator uses the median of all scaling factors
for each sub-sample to estimate the bandwidth of the full sample.
If False, the estimator uses the mean.
return_only_bw: bool, optional
If True, the estimator is to use the bandwidth and not the
scaling factor. This is *not* theoretically justified.
Should be used only for experimenting.
n_jobs : int, optional
The number of jobs to use for parallel estimation with
``joblib.Parallel``. Default is -1, meaning ``n_cores - 1``, with
``n_cores`` the number of available CPU cores.
See the `joblib documentation
<http://packages.python.org/joblib/parallel.html>`_ for more details.
Examples
--------
>>> settings = EstimatorSettings(randomize=True, n_jobs=3)
>>> k_dens = KDEMultivariate(data, var_type, defaults=settings)
"""
def __init__(self, efficient=False, randomize=False, n_res=25, n_sub=50,
return_median=True, return_only_bw=False, n_jobs=-1):
self.efficient = efficient
self.randomize = randomize
self.n_res = n_res
self.n_sub = n_sub
self.return_median = return_median
self.return_only_bw = return_only_bw # TODO: remove this?
self.n_jobs = n_jobs
class LeaveOneOut(object):
"""
Generator to give leave-one-out views on X.
Parameters
----------
X : array-like
2-D array.
Examples
--------
>>> X = np.random.normal(0, 1, [10,2])
>>> loo = LeaveOneOut(X)
>>> for x in loo:
... print x
Notes
-----
A little lighter weight than sklearn LOO. We don't need test index.
Also passes views on X, not the index.
"""
def __init__(self, X):
self.X = np.asarray(X)
def __iter__(self):
X = self.X
nobs, k_vars = np.shape(X)
for i in xrange(nobs):
index = np.ones(nobs, dtype=np.bool)
index[i] = False
yield X[index, :]
def _get_type_pos(var_type):
ix_cont = np.array([c == 'c' for c in var_type])
ix_ord = np.array([c == 'o' for c in var_type])
ix_unord = np.array([c == 'u' for c in var_type])
return ix_cont, ix_ord, ix_unord
def _adjust_shape(dat, k_vars):
""" Returns an array of shape (nobs, k_vars) for use with `gpke`."""
dat = np.asarray(dat)
if dat.ndim > 2:
dat = np.squeeze(dat)
if dat.ndim == 1 and k_vars > 1: # one obs many vars
nobs = 1
elif dat.ndim == 1 and k_vars == 1: # one obs one var
nobs = len(dat)
else:
if np.shape(dat)[0] == k_vars and np.shape(dat)[1] != k_vars:
dat = dat.T
nobs = np.shape(dat)[0] # ndim >1 so many obs many vars
dat = np.reshape(dat, (nobs, k_vars))
return dat
def gpke(bw, data, data_predict, var_type, ckertype='gaussian',
okertype='wangryzin', ukertype='aitchisonaitken', tosum=True):
"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right)
"""
kertypes = dict(c=ckertype, o=okertype, u=ukertype)
#Kval = []
#for ii, vtype in enumerate(var_type):
# func = kernel_func[kertypes[vtype]]
# Kval.append(func(bw[ii], data[:, ii], data_predict[ii]))
#Kval = np.column_stack(Kval)
Kval = np.empty(data.shape)
for ii, vtype in enumerate(var_type):
func = kernel_func[kertypes[vtype]]
Kval[:, ii] = func(bw[ii], data[:, ii], data_predict[ii])
iscontinuous = np.array([c == 'c' for c in var_type])
dens = Kval.prod(axis=1) / np.prod(bw[iscontinuous])
if tosum:
return dens.sum(axis=0)
else:
return dens
| bsd-3-clause |
trendelkampschroer/PyEMMA | pyemma/plots/plots2d.py | 1 | 4320 |
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
__author__ = 'noe'
import numpy as _np
import matplotlib.pylab as _plt
from scipy.interpolate import griddata as gd
def contour(x, y, z, ncontours = 50, colorbar=True, fig=None, ax=None, method='linear', zlim=None, cmap=None):
# check input
if (ax is None):
if fig is None:
ax = _plt.gca()
else:
ax = fig.gca()
# grid data
points = _np.hstack([x[:,None],y[:,None]])
xi, yi = _np.mgrid[x.min():x.max():100j, y.min():y.max():100j]
zi = gd(points, z, (xi, yi), method=method)
# contour level levels
if zlim is None:
zlim = (z.min(), z.max())
eps = (zlim[1] - zlim[0]) / float(ncontours)
levels = _np.linspace(zlim[0] - eps, zlim[1] + eps)
# contour plot
if cmap is None:
cmap=_plt.cm.jet
cf = ax.contourf(xi, yi, zi, ncontours, cmap=cmap, levels=levels)
# color bar if requested
if colorbar:
_plt.colorbar(cf, ax=ax)
return ax
def scatter_contour(x, y, z, ncontours = 50, colorbar=True, fig=None, ax=None, cmap=None, outfile=None):
"""Shows a contour plot on scattered data (x,y,z) and the plots the positions of the points (x,y) on top.
Parameters
----------
x : ndarray(T)
x-coordinates
y : ndarray(T)
y-coordinates
z : ndarray(T)
z-coordinates
ncontours : int, optional, default = 50
number of contour levels
fig : matplotlib Figure object, optional, default = None
the figure to plot into. When set to None the default Figure object will be used
ax : matplotlib Axes object, optional, default = None
the axes to plot to. When set to None the default Axes object will be used.
cmap : matplotlib colormap, optional, default = None
the color map to use. None will use pylab.cm.jet.
outfile : str, optional, default = None
output file to write the figure to. When not given, the plot will be displayed
Returns
-------
ax : Axes object containing the plot
"""
ax = contour(x, y, z, ncontours=ncontours, colorbar=colorbar, fig=fig, ax=ax, cmap=cmap)
# scatter points
ax.scatter(x,y,marker='o',c='b',s=5)
# show or save
if outfile is not None:
_plt.savefig(outfile)
return ax
def plot_free_energy(xall, yall, weights=None, ax=None, nbins=100, offset=0.1, cmap=_plt.cm.spectral, cbar=True, cbar_label=None):
"""
"""
# histogram data
z,x,y = _np.histogram2d(xall, yall, bins=nbins, weights=weights)
z += offset
# compute free energies
F = -_np.log(z)
# do a contour plot
extent = [x[0], x[-1], y[0], y[-1]]
if ax is None:
ax = _plt.gca()
ax.contourf(F.T, 100, extent=extent, cmap=cmap)
if cbar:
cbar = _plt.colorbar()
if cbar_label is not None:
cbar.ax.set_ylabel(cbar_label)
return ax
| bsd-2-clause |
elijah513/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/io/test_parquet.py | 1 | 18580 | """ test parquet compat """
import datetime
from distutils.version import LooseVersion
import os
from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas.util import testing as tm
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET, reason="fastparquet is not installed"
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(expected, actual, check_names=check_names)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
if (
LooseVersion(pyarrow.__version__) < "0.15"
and LooseVersion(pyarrow.__version__) >= "0.13"
):
pytest.xfail(
"Reading fastparquet with pyarrow in 0.14 fails: "
"https://issues.apache.org/jira/browse/ARROW-6492"
)
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# unicode
df.columns = ["foo", "bar"]
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError)
# bytes
df.columns = [b"foo", b"bar"]
self.check_error_on_write(df, engine, ValueError)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
self.check_error_on_write(df, engine, ValueError)
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
def test_compression(self, engine, compression):
if compression == "snappy":
pytest.importorskip("snappy")
elif compression == "brotli":
pytest.importorskip("brotli")
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={"compression": compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
expected = pd.DataFrame({"string": list("abc")})
check_round_trip(
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
)
def test_write_index(self, engine):
check_names = engine != "fastparquet"
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
check_round_trip(df, engine)
def test_write_multiindex(self, pa):
# Not supported in fastparquet as of 0.1.3 or older pyarrow version
engine = pa
df = pd.DataFrame({"A": [1, 2, 3]})
index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df.index = index
check_round_trip(df, engine)
def test_write_column_multiindex(self, engine):
# column multi-index
mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
self.check_error_on_write(df, engine, ValueError)
def test_multiindex_with_columns(self, pa):
engine = pa
dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS")
df = pd.DataFrame(np.random.randn(2 * len(dates), 3), columns=list("ABC"))
index1 = pd.MultiIndex.from_product(
[["Level1", "Level2"], dates], names=["level", "date"]
)
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(
df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]]
)
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]})
write_kwargs = {"compression": None, "index": False}
# Because we're dropping the index, we expect the loaded dataframe to
# have the default integer index.
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore custom index
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"]
)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore multi-indexes as well.
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(
{"one": [i for i in range(8)], "two": [-i for i in range(8)]}, index=arrays
)
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
df["bool_with_none"] = [True, None, True]
check_round_trip(df, pa)
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
check_round_trip(
df,
pa,
expected=df[["string", "int"]],
read_kwargs={"columns": ["string", "int"]},
)
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, pa, ValueError)
def test_unsupported(self, pa):
# period
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
# timedelta
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
self.check_error_on_write(df, pa, NotImplementedError)
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
def test_categorical(self, pa):
# supported in >= 0.7.0
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
# de-serialized as object
expected = df.assign(a=df.a.astype(object))
check_round_trip(df, pa, expected=expected)
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet")
def test_partition_cols_supported(self, pa, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == 2
assert dataset.partitions.partition_names == set(partition_cols)
def test_empty_dataframe(self, pa):
# GH #27339
df = pd.DataFrame()
check_round_trip(df, pa)
class TestParquetFastParquet(Base):
@td.skip_if_no("fastparquet", min_version="0.2.1")
def test_basic(self, fp, df_full):
df = df_full
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="US/Eastern")
df["timedelta"] = pd.timedelta_range("1 day", periods=3)
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, fp, ValueError)
def test_bool_with_none(self, fp):
df = pd.DataFrame({"a": [True, None, False]})
expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16")
check_round_trip(df, fp, expected=expected)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
self.check_error_on_write(df, fp, ValueError)
# mixed
df = pd.DataFrame({"a": ["a", 1, 2.0]})
self.check_error_on_write(df, fp, ValueError)
def test_categorical(self, fp):
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
check_round_trip(df, fp)
def test_filter_row_groups(self, fp):
d = {"a": list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None, row_group_offsets=1)
result = read_parquet(path, fp, filters=[("a", "==", 0)])
assert len(result) == 1
def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
check_round_trip(df_compat, fp, path="s3://pandas-test/fastparquet.parquet")
def test_partition_cols_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_partition_on_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with pytest.raises(ValueError):
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
partition_cols=partition_cols,
)
def test_empty_dataframe(self, fp):
# GH #27339
df = pd.DataFrame()
expected = df.copy()
expected.index.name = "index"
check_round_trip(df, fp, expected=expected)
| apache-2.0 |
antoan2/incubator-mxnet | example/rcnn/rcnn/core/tester.py | 25 | 10193 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cPickle
import os
import time
import mxnet as mx
import numpy as np
from module import MutableModule
from rcnn.logger import logger
from rcnn.config import config
from rcnn.io import image
from rcnn.processing.bbox_transform import bbox_pred, clip_boxes
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
class Predictor(object):
def __init__(self, symbol, data_names, label_names,
context=mx.cpu(), max_data_shapes=None,
provide_data=None, provide_label=None,
arg_params=None, aux_params=None):
self._mod = MutableModule(symbol, data_names, label_names,
context=context, max_data_shapes=max_data_shapes)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return dict(zip(self._mod.output_names, self._mod.get_outputs()))
def im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
# drop the batch index
boxes = output['rois_output'].asnumpy()[:, 1:]
scores = output['rois_score'].asnumpy()
# transform to original scale
boxes = boxes / scale
return scores, boxes, data_dict
def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):
"""
Generate detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param vis: controls visualization
:param thresh: thresh for valid detections
:return: list of detected boxes
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
i = 0
t = time.time()
imdb_boxes = list()
original_boxes = list()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
# assemble proposals
dets = np.hstack((boxes, scores))
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4:] > thresh)[0]
dets = dets[keep, :]
imdb_boxes.append(dets)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)
logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +
'proposal %d ' % (dets.shape[0]) +
'data %.4fs net %.4fs' % (t1, t2))
i += 1
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
# save results
rpn_folder = os.path.join(imdb.root_path, 'rpn_data')
if not os.path.exists(rpn_folder):
os.mkdir(rpn_folder)
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
if thresh > 0:
full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')
with open(full_rpn_file, 'wb') as f:
cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)
logger.info('wrote rpn proposals to %s' % rpn_file)
return imdb_boxes
def im_detect(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
data_dict = dict(zip(data_names, data_batch.data))
if config.TEST.HAS_RPN:
rois = output['rois_output'].asnumpy()[:, 1:]
else:
rois = data_dict['rois'].asnumpy().reshape((-1, 5))[:, 1:]
im_shape = data_dict['data'].shape
# save output
scores = output['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output['bbox_pred_reshape_output'].asnumpy()[0]
# post processing
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, im_shape[-2:])
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = pred_boxes / scale
return scores, pred_boxes, data_dict
def pred_eval(predictor, test_data, imdb, vis=False, thresh=1e-3):
"""
wrapper for calculating offline validation for faster data analysis
in this example, all threshold are set by hand
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffle
:param imdb: image database
:param vis: controls visualization
:param thresh: valid detection threshold
:return:
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
nms = py_nms_wrapper(config.TEST.NMS)
# limit detections to max_per_image over all classes
max_per_image = -1
num_images = imdb.num_images
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
i = 0
t = time.time()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
for j in range(1, imdb.num_classes):
indexes = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets)
all_boxes[j][i] = cls_dets[keep, :]
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
boxes_this_image = [[]] + [all_boxes[j][i] for j in range(1, imdb.num_classes)]
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, imdb.classes, scale)
t3 = time.time() - t
t = time.time()
logger.info('testing %d/%d data %.4fs net %.4fs post %.4fs' % (i, imdb.num_images, t1, t2, t3))
i += 1
det_file = os.path.join(imdb.cache_path, imdb.name + '_detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, protocol=cPickle.HIGHEST_PROTOCOL)
imdb.evaluate_detections(all_boxes)
def vis_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
import random
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.random(), random.random(), random.random()) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.show()
def draw_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
bbox = map(int, bbox)
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
return im
| apache-2.0 |
DiCarloLab-Delft/PycQED_py3 | pycqed/tests/test_SSRO_analysis.py | 2 | 3841 | import unittest
import numpy as np
import pycqed as pq
import os
# # hack for badly installed matplotlib on maserati pc
# import matplotlib
# matplotlib.use('QT4Agg')
from pycqed.analysis import measurement_analysis as ma
from pycqed.analysis.tools.data_manipulation import rotate_complex
from pycqed.analysis import multiplexed_RO_analysis as mra
class Test_SSRO_discrimination_analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_discrimination_fidelity(self):
# Test the correct file is loaded
a = ma.SSRO_discrimination_analysis(label='dummy_Butterfly',
plot_2D_histograms=False)
self.assertEqual(
a.folder,
os.path.join(self.datadir, '20161214', '120000_dummy_Butterfly'))
mu_a = a.mu_a
mu_b = a.mu_b
# Test if the fit gives the expected means
self.assertAlmostEqual(mu_a.real, -6719.6, places=1)
self.assertAlmostEqual(mu_a.imag, 20024.2, places=1)
self.assertAlmostEqual(mu_b.real, 1949.4, places=1)
self.assertAlmostEqual(mu_b.imag, 37633.0, places=1)
# Test identifying the rotation vector
self.assertAlmostEqual(a.theta % 180, 63.8, places=1)
self.assertAlmostEqual(a.theta % 180,
np.angle(a.mu_b-a.mu_a,
deg=True), places=1)
diff_v_r = rotate_complex((mu_b-mu_a), -a.theta)
self.assertAlmostEqual(diff_v_r.imag, 0)
self.assertAlmostEqual(a.opt_I_threshold,
np.mean([mu_a.real, mu_b.real]), places=1)
self.assertAlmostEqual(a.F_discr, 0.954, places=3)
self.assertAlmostEqual(a.F_discr_I, 0.5427, places=3)
def test_rotated_discrimination_fidelity(self):
# First is run to determine the theta to rotate with
a = ma.SSRO_discrimination_analysis(
label='dummy_Butterfly',
plot_2D_histograms=False)
a = ma.SSRO_discrimination_analysis(
label='dummy_Butterfly', theta_in=-a.theta,
plot_2D_histograms=True)
self.assertEqual(
a.folder,
os.path.join(self.datadir, '20161214', '120000_dummy_Butterfly'))
mu_a = a.mu_a
mu_b = a.mu_b
self.assertAlmostEqual((mu_b-mu_a).imag/10, 0, places=0)
self.assertAlmostEqual(a.F_discr, a.F_discr,
places=3)
def test_discrimination_fidelity_small_vals(self):
a = ma.SSRO_discrimination_analysis(timestamp='20170716_144742')
self.assertAlmostEqual(a.F_discr, 0.934047, places=3)
self.assertAlmostEqual(a.F_discr_I, 0.8052, places=3)
def test_single_quadrature_discr_fid(self):
a = ma.SSRO_single_quadrature_discriminiation_analysis(
timestamp='20170716_134634')
self.assertAlmostEqual(a.F_discr, 0.79633097)
class Test_multiplexed_SSRO_analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_two_qubit_ssro(self):
res_dict = mra.two_qubit_ssro_fidelity(label='SSRO_QL_QR')
self.assertAlmostEqual(res_dict['Fa_q0'], 0.6169, places=2)
self.assertAlmostEqual(res_dict['Fa_q1'], 0.8504, places=2)
self.assertAlmostEqual(res_dict['Fd_q0'], 0.6559, places=2)
self.assertAlmostEqual(res_dict['Fd_q1'], 0.8728, places=2)
mu_mat_exp = np.array([[1.04126946, -0.00517882],
[-0.03172471, 1.00574731]])
np.testing.assert_almost_equal(res_dict['mu_matrix'], mu_mat_exp)
| mit |
kinverarity1/mtwaffle | mtwaffle/mt.py | 1 | 18609 | '''General MT functions.'''
import logging
import os.path
import textwrap
from mpl_toolkits.axes_grid1 import ImageGrid
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
import numpy.linalg as LA
import scipy
from scipy.interpolate import interp1d
import scipy.optimize
import attrdict
logger = logging.getLogger(__name__)
RAD2DEG = 180 / np.pi
def mrad2deg(arr):
'''Convert milliradians to degrees, and keep it in the first quadrant.'''
arr = arr / 1000 / np.pi * 180
arr2 = np.empty_like(arr)
for i, d in enumerate(arr):
while d < -90:
d += 180
arr2[i] = d
return arr2
def linear_interp(freqs, Zs, newfreqs, extrapolation='remove'):
'''Calculate impedance tensors at new frequencies by linear interpolation.
Args:
- *freqs*: n x 1 ndarray of frequencies
- *Zs*: n x 2 x 2 complex ndarray of impedance tensors
- *newfreqs*: n x 1 ndarray of new frequencies
- *extrapolation*: string, one of:
- 'nan': add np.NaN values for frequencies that require extrapolation;
this guarantees newsfreqs.shape==freqs.shape and newZs.shape==Zs.shape
- 'remove': alter *newfreqs* such that no extrapolation is done
- 'error': raise Exception in extrapolation is required
Returns:
- *newfreqs*: m x 1 ndarray of new frequencies
- *newZs*: m x 2 x 2 complex ndarray of impedance tensors
'''
freqs = np.asarray(freqs)
newfreqs = np.asarray(newfreqs)
assert len(freqs) == Zs.shape[0]
# Sort Zs from low to high freq.
indices = np.argsort(freqs)
freqs = freqs[indices]
Zs = Zs[indices]
freq0 = freqs[0]
freq1 = freqs[-1]
if extrapolation == 'nan':
Znan = np.ones((2, 2)) * np.nan
for f in newfreqs:
if f < freq0:
freqs = np.insert(freqs, 0, f, axis=0)
Zs = np.insert(Zs, 0, Znan, axis=0)
if f > freq1:
freqs = np.append(freqs, [f], axis=0)
Zs = np.append(Zs, np.array([Znan]), axis=0)
indices = np.argsort(freqs)
freqs = freqs[indices]
Zs = Zs[indices]
elif extrapolation == 'remove':
newfreqs = np.array([
f for f in newfreqs if f >= freqs[0] and f <= freqs[-1]])
newfreqs.sort()
elif extrapolation == 'error':
for nf in newfreqs:
if nf < freqs[0]:
raise Exception('newfreq %f < (%f-%f)' % (nf, freqs[0], freqs[-1]))
if nf > freqs[-1]:
raise Exception('newfreq %f > (%f-%f)' % (nf, freqs[0], freqs[-1]))
newZs = np.empty((len(newfreqs), 2, 2), dtype=np.complex)
for i, j in ((0,0), (0,1), (1,0), (1,1)):
newZs[:,i,j] = (interp1d(freqs, Zs[:,i,j].real, axis=0)(newfreqs) +
interp1d(freqs, Zs[:,i,j].imag, axis=0)(newfreqs) * 1j)
return newfreqs, newZs
def between_freqs(freqs, f0=None, f1=None):
'''Return indices to freqs for items which are between two extremes (f0 and f1).
Args:
- *freqs*: n x 1 ndarray
- *f0, f1*: floats for min and max frequencies
Returns: *indices* to *freqs* array
'''
freqs = np.asarray(freqs)
if f1 is None or f1 > np.max(freqs):
f1 = np.max(freqs)
if f0 is None or f0 < np.min(freqs):
f0 = np.min(freqs)
indices = []
for i, freq in enumerate(freqs):
if freq >= f0 and freq <= f1:
indices.append(i)
return np.asarray(indices)
def ohms2mV_km_nT(zs):
'''Convert imp. tensor(s) from ohms to mV/km/nT.'''
return zs * 796.
def mV_km_nT2ohms(zs):
'''Convert imp. tensor(s) from mV/km/nT to ohms'''
return zs / 796.
def inv_imag_sign(zs):
'''Invert sign of imaginary parts of imp. tensor(s).'''
return zs.real + zs.imag * -1 * 1j
def delete(arrays, indices):
'''Delete *indices* from each ndarray in *arrays*.
See source and ``np.delete`` function.
'''
ret_arrays = []
for array in arrays:
ret_arrays.append(np.delete(array, indices, axis=0))
return ret_arrays
def delete_freq(del_freqs, freqs, arrays, ret_indices=False):
'''Find the indices of *del_freqs* in *freqs* and delete those entries from
each array in *arrays*, and return the new set of frequencies and arrays.
Args:
- *del_freqs*: frequencies to delete from *freqs*
- *freqs*: sequence of frequencies
- *arrays*: sequence of ndarrays
Returns:
- *freqs*: an ndarray of frequencies
- *new_arrays*: a list of the passed *arrays* with the right thing removed.
- (optional) *indices*: indices which were removed.
'''
new_freqs = list(freqs)
for del_freq in del_freqs:
if del_freq in freqs:
i = new_freqs.index(utils.find_nearest(del_freq, freqs))
del new_freqs[i]
arrays = delete(arrays, i)
if ret_indices:
fdels = utils.find_nearest(del_freqs, freqs)
indices = [list(freqs).index(fdel) for fdel in fdels]
return np.array(new_freqs), arrays, indices
else:
return np.array(new_freqs), arrays
def appres(zs, freqs):
'''Convert imp. tensor(s) (mV/km/nT) to apparent resistivity(s) (ohm.m).
Args:
- *freqs*: float or n x 1 ndarray
- *zs*: float, 2 x 2 complex ndarray or n x 2 x 2 complex ndarray with
impedance in units of mV/km/nT
Returns: *res*
- *res*: same shape as *zs*
'''
Zs = np.asarray(zs)
try:
assert Zs.ndim == 3
res = np.empty_like(Zs, dtype=np.float)
assert len(freqs) == Zs.shape[0]
for i, f in enumerate(freqs):
res[i, ...] = 0.2 / f * np.abs(Zs[i]) ** 2
return res
except:
return 0.2 / freqs * np.abs(Zs) ** 2
def phase(zs):
'''Phase of imp. tensor(s) - calculated in the first quadrant.'''
return np.arctan(zs.imag / zs.real) * RAD2DEG
def phase2(zs):
'''Phase of imp. tensor(s) - calculated with quadrant information preserved.'''
return np.arctan2(zs.imag, zs.real) * RAD2DEG
def phase_abs(zs):
'''Phase of imp. tensor(s) - forced into the first quadrant.'''
return np.arctan(np.abs(zs.imag / zs.real)) * RAD2DEG
def rot(A, theta=0):
'''Rotate 2 x 2 array A by *theta* degrees.'''
t = np.float(theta) / RAD2DEG
R = np.array([[np.cos(t), -1 * np.sin(t)], [np.sin(t), np.cos(t)]])
return np.dot(R.T, np.dot(A, R))
def rot_arr(arrs, theta):
'''Rotate a list of 2 x 2 arrays by theta degrees.
Arguments:
arrs (list): list of 2 x 2 arrays.
theta (int): degrees.
'''
return np.array([rot(arr, theta) for arr in arrs])
def lilley_Z1(z):
return (z[0, 0] + z[1, 1]) / 2
def lilley_Z2(z):
return (z[0, 0] - z[1, 1]) / 2
def lilley_Z3(z):
return (z[0, 1] + z[1, 0]) / 2
def lilley_Z4(z):
return (z[0, 1] - z[1, 0]) / 2
def Z3(z):
return (z[0, 1] + z[1, 0]) / 2
def Z4(z):
return (z[0, 0] - z[1, 1]) / 2
def tan4t(z, bit='both'):
Z4cc = Z4(z).real + Z4(z).imag * -1j
num = 2 * (Z3(z) * Z4cc).real
den = np.abs(Z4(z)) ** 2 - np.abs(Z3(z)) ** 2
if bit == 'both':
return num / den
elif bit == 'num':
return num
elif bit == 'den':
return den
def egt(z):
num = tan4t(z, 'num')
den = tan4t(z, 'den')
return np.arctan2(num, den) / 4
def fm9(z):
return np.abs(z[0,1]) ** 2 + np.abs(z[1,0]) ** 2
def ptensors(zs):
'''Calculate phase tensor(s) for imp. tensor(s) (Caldwell 2004).
Arguments:
zs (either 2 x 2 ndarray or [<2x2 ndarray>, <2x2 ndarray>, ...]): impedance tensors
Returns: phase tensors in the same shape as the argument Z.
'''
Z = np.asarray(zs)
if Z.ndim == 2:
return np.dot(LA.inv(Z.real), Z.imag)
elif Z.ndim == 3:
return np.asarray([ptensors(Zi) for Zi in Z])
def ptens_normskew(zs):
'''Normalised phase tensor skew(s) of Booker (2012).
zs can be either 2 x 2 or n x 2 x 2 for n frequencies.
'''
Z = np.asarray(zs)
if Z.ndim == 2:
P = ptensors(Z)
return np.arctan2(P[0, 1] - P[1, 0], np.trace(P)) * RAD2DEG
elif Z.ndim == 3:
return np.asarray([ptens_normskew(Zi) for Zi in Z])
def ptens_azimuth(zs):
'''Rotation azimuth of phase tensor(s) such that diags are max-ed & Pxx > Pyy.
Find the rotation angle for impedance tensor *Z* such that
1. The sum of squares of the off-diagonals of the phase tensor are minimized
(i.e. coordinate axes parallel to ellipse axes); and
2. ptens[0, 0] > ptens[1, 1]
(i.e. ellipse major axis is parallel to the first coordinate axis)
(mathematical rotation angle, so it's counter-clockwise,
but then the coordinate system is the reverse.)
'''
Z = np.asarray(zs)
if Z.ndim == 2:
def offdiagsum(t):
x = rot(Z, t)
P = ptensors(x)
return P[0, 1] ** 2 + P[1, 0] ** 2
xopt = scipy.optimize.fmin(offdiagsum, 0.1, disp=False)
angle1 = xopt[0]
logger.debug('ptensaz: inital solution=%f' % angle1)
# We want the angle which aligns the 1st coordinate axis with the major
# axis of the ellipse, so need to check the angle 90 degrees away from the
# solution.
if angle1 < 0:
angle1 = 360 + angle1
logger.debug('ptensaz: %f' % angle1)
angle2 = angle1 - 90
if angle2 < 0:
angle2 = 360 + angle2
logger.debug('ptensaz: after removal of negative angles=%f, %f' % (angle1, angle2))
# We want the smaller angle, between 0 and 180 degrees:
if angle1 > 180:
angle1 -= 180
if angle2 > 180:
angle2 -= 180
logger.debug('ptensaz: after adjustment to first 2 quadrants=%f, %f' % (angle1, angle2))
ptens1 = ptensors(rot(Z, angle1))
ptens2 = ptensors(rot(Z, angle2))
if ptens2[0, 0] > ptens1[0, 0]:
return angle2
else:
return angle1
elif Z.ndim == 3:
return np.array([ptens_azimuth(zi) for zi in Z])
def ptens_alpha(ptensors):
'''Phase tensor(s) alpha angle (Caldwell 2004).'''
P = np.asarray(ptensors)
if P.ndim == 2:
return 0.5 * np.arctan2((P[0,1] + P[1,0]), (P[0,0] - P[1,1])) * 180 / np.pi
elif P.ndim == 3:
return np.array([ptens_alpha(pi) for pi in P])
def ptens_beta(ptensors):
'''Phase tensor(s) beta angle (Caldwell 2004).'''
P = np.asarray(ptensors)
if P.ndim == 2:
return 0.5 * np.arctan2((P[0,1] - P[1,0]), (P[0,0] + P[1,1])) * 180 / np.pi
elif P.ndim == 3:
return np.array([ptens_beta(pi) for pi in P])
def ptens_min(ptensors):
'''Minimum angle of phase tensor(s) (Caldwell 2004, A8).'''
P = np.asarray(ptensors)
if P.ndim == 2:
return (np.sqrt(ptens1(P)**2 + ptens3(P)**2)
- np.sqrt(ptens1(P)**2 + ptens3(P)**2 - ptens2(P)**2))
elif P.ndim == 3:
return np.array([ptens_min(pi) for pi in P])
def ptens_max(ptensors):
'''Maximum angle of phase tensor(s) (Caldwell 2004, A9).'''
P = np.asarray(ptensors)
if P.ndim == 2:
return (np.sqrt(ptens1(P)**2 + ptens3(P)**2)
+ np.sqrt(ptens1(P)**2 + ptens3(P)**2 - ptens2(P)**2))
elif P.ndim == 3:
return np.array([ptens_max(pi) for pi in P])
def ptens1(P):
return ptens_tr(P) / 2.
def ptens2(P):
return np.sqrt(ptens_det(P))
def ptens3(P):
return ptens_skew(P) / 2.
def ptens_tr(P):
return P[0, 0] + P[1, 1]
def ptens_skew(ptensors):
'''Skew angle of phase tensor(s) (Caldwell 2004).'''
P = np.asarray(ptensors)
if P.ndim == 2:
return P[0, 1] - P[1, 0]
elif P.ndim == 3:
return np.array([ptens_skew(pi) for pi in ptensors])
def ptens_det(P):
return (P[0, 0] * P[1, 1]) - (P[0, 1] * P[1, 0])
def ptens_theta(ptensors):
'''Theta angle from phase tensor(s).'''
return ptens_alpha(P) - ptens_beta(P)
def ptens_ppspl(P):
'''Return difference in degrees between Pmax and Pmin.'''
p1 = np.rad2deg(np.arctan(ptens_max(P)))
p0 = np.rad2deg(np.arctan(ptens_min(P)))
return p1 - p0
def ptens_vectors(ptensors, n_thetas=45):
'''Return n_theta vectors for phase tensor/s around the unit circle.
For each vector v_u on the unit circle (there are n_thetas of these vectors)
calculate P dot v_u and return the family of the resulting vectors, together
with the thetas
Returns: tuple (thetas, vecs)
thetas (ndarray): the angles on the unit circle. Shape is (n_thetas).
vecs (ndarray): the vectors. If ptensors.shape == (2, 2) then vecs.shape
== (n_thetas, 2); if ptensors.shape == (m, 2, 2) then vecs.shape ==
(m, n_thetas, 2).
'''
P = np.asarray(ptensors)
if P.ndim == 2:
thetas = np.linspace(0, 2 * np.pi, n_thetas)
vecs = np.empty((n_thetas, 2))
for i, t in enumerate(thetas):
vunit = np.array([np.cos(t), np.sin(t)])
vecs[i, ...] = np.dot(P, vunit)
return thetas, vecs
elif P.ndim == 3:
vecs_list = []
for pi in P:
thetas, vecs = ptens_vectors(pi)
vecs_list.append(vecs)
return thetas, np.asarray(vecs_list)
# def ptens_misfit(thetas, obs_vecs, fwd_vecs):
# '''Return phase tensor misfit vectors and angular misfits.
# Args:
# - *thetas*: n x 1 ndarray of angles
# - *obs_vecs*: n x 2 ndarray from :func:`ptens_vectors`
# - *fwd_vecs*: n x 2 ndarray from :func:`ptens_vectors`
# Returns:
# - *mf_vecs*: n x 2 ndarray of misfit vectors
# - *mf_angles*: n x 1 ndarray of misfit angles between the observed and
# forward resulting vector
# '''
# n = len(thetas)
# mf_vecs = np.empty((n, 2))
# mf_angles = np.empty(n)
# for k, t in enumerate(thetas):
# vd = obs_vecs[k]
# vf = fwd_vecs[k]
def normfreqs(zs, freqs):
'''Normalise imp. tensor(s) magnitude by multiplying by sqrt(period).'''
Z = np.asarray(zs).copy()
factor = np.sqrt(1. / freqs)
if Z.ndim == 3:
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
Z[:, i, j] = Z[:, i, j].real * factor + Z[:, i, j].imag * factor * 1j
else:
Z = Z.real * factor + Z.imag * factor * 1j
return Z
def bostick(freqs, appres, phases):
'''Bostick transform of imp. tensor(s) - returns tuple (depths, resistivities).
Args:
- *freqs*: n x 1 ndarray
- *res*: n x 2 x 2 or n x 1 ndarray of apparent resistivities
- *phase*: ndarray, same shape as *res*, units of degrees
Returns:
- *bz*: n x m ndarray of depths in metres
- *br*: n x m ndarray of resistivities in ohm metres
'''
freqs = np.asarray(freqs)
appres = np.asarray(appres)
phases = np.asarray(phases)
n = len(freqs)
if appres.shape == (n, 2, 2):
bz = np.empty((n, 2, 2))
for i in (0, 1):
for j in (0, 1):
bz[:, i, j] = 355.4 * np.sqrt(appres[:, i, j] / freqs)
else:
assert appres.shape == freqs.shape
bz = 355.4 * np.sqrt(appres / freqs)
br = appres * (3.1416 / (2 * np.deg2rad(phases)) - 1)
return np.array([bz, br])
def z11b(z, b):
return z[0, 0] * (np.cos(b) ** 2) + (z[0, 1] + z[1, 0]) * np.cos(b) * np.sin(b) + z[1, 1] * (np.sin(b) ** 2)
def z12b(z, b):
return z[0, 1] * (np.cos(b) ** 2) + (z[1, 1] - z[0, 0]) * np.cos(b) * np.sin(b) - z[1, 0] * (np.sin(b) ** 2)
def cgamma(Z, out_unit='deg'):
'''Invariant measure of 3D-ness of impedance tensor (Lilley 1998, eq. 37).'''
return catan2(Z[1, 1] + Z[0, 0], Z[0, 1] - Z[1, 0], out_unit)
def cgammas(zs, out_unit='deg'):
'''Invariant measure of 3D-ness of impedance tensors (Lilley 1998, eq. 37).'''
return np.array([cgamma(Z, out_unit) for Z in Zs])
def pos_quads(carr, units='deg'):
'''Move angles from the 3rd and 4th quadrants into the 1st or 2nd quadrants,
using the opposite direction.'''
if units == 'deg':
opp = 180
else:
opp = np.pi
carr_re = carr.real
carr_im = carr.imag
for i in range(len(carr)):
if carr_re[i] < 0:
carr_re[i] += opp
if carr_im[i] < 0:
carr_im[i] += opp
return carr_re + carr_im * 1j
def catan2(num, den, out_unit='deg'):
'''Complex arctan2 function.
Arguments:
num (float): numerator
den (float): denominator
out_unit (str): either 'deg' or 'rad'
'''
real = np.arctan2(num.real, den.real)
imag = np.arctan2(num.imag, den.imag)
if out_unit == 'deg':
real = real * 180 / np.pi
imag = imag * 180 / np.pi
else:
assert out_unit == 'rad'
return real + imag * 1j
lzdd = lambda z: z[1, 1] - z[0, 0]
lzos = lambda z: z[0, 1] + z[1, 0]
lzds = lambda z: z[1, 1] + z[0, 0]
lzod = lambda z: z[0, 1] - z[1, 0]
def theta_e(z, out_unit='deg'):
'''Electric strike of impedance tensor (Lilley 1998).
Arguments:
out_unit (str): 'deg' or 'rad'
'''
return 0.5 * (catan2(lzdd(z), lzos(z), out_unit) + catan2(lzds(z), lzod(z), out_unit))
def theta_h(z, out_unit='deg'):
'''Magnetic strike of impedance tensor (Lilley 1998).
Arguments:
out_unit (str): 'deg' or 'rad'
'''
return 0.5 * (catan2(lzdd(z), lzos(z), out_unit) - catan2(lzds(z), lzod(z), out_unit))
def theta_es(zs, **kwargs):
'''Electric strike of impedance tensors (Lilley 1998).
See theta_e function for keyword arguments.
'''
return np.array([theta_e(z) for z in zs])
def theta_hs(zs, **kwargs):
'''Magnetic strike of impedance tensors (Lilley 1998).
See theta_h function for keyword arguments.
'''
return np.array([theta_h(z) for z in zs])
class L(object):
def __init__(s, T):
T11 = T[0, 0]
T12 = T[0, 1]
T21 = T[1, 0]
T22 = T[1, 1]
s.t1 = (T11 + T22) / 2
s.t2 = (T12 + T21) / 2
s.t3 = (T11 - T22) / 2
s.t4 = (T12 - T21) / 2
s.t0 = np.sqrt(s.t2 ** 2 + s.t3 ** 2)
def t11b(z, b):
return z[0, 0] * (np.cos(b) ** 2) + (z[0, 1] + z[1, 0]) * np.cos(b) * np.sin(b) + z[1, 1] * (np.sin(b) ** 2)
def t12b(z, b):
return z[0, 1] * (np.cos(b) ** 2) + (z[1, 1] - z[0, 0]) * np.cos(b) * np.sin(b) - z[1, 0] * (np.sin(b) ** 2)
callables = {fname: globals()[fname] for fname in dir() if callable(globals()[fname])}
| mit |
WarrenWeckesser/scikits-image | doc/examples/plot_hog.py | 2 | 4450 | """
===============================
Histogram of Oriented Gradients
===============================
The `Histogram of Oriented Gradient
<http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients>`__ (HOG) feature
descriptor [1]_ is popular for object detection.
In the following example, we compute the HOG descriptor and display
a visualisation.
Algorithm overview
------------------
Compute a Histogram of Oriented Gradients (HOG) by
1. (optional) global image normalisation
2. computing the gradient image in x and y
3. computing gradient histograms
4. normalising across blocks
5. flattening into a feature vector
The first stage applies an optional global image normalisation
equalisation that is designed to reduce the influence of illumination
effects. In practice we use gamma (power law) compression, either
computing the square root or the log of each colour channel.
Image texture strength is typically proportional to the local surface
illumination so this compression helps to reduce the effects of local
shadowing and illumination variations.
The second stage computes first order image gradients. These capture
contour, silhouette and some texture information, while providing
further resistance to illumination variations. The locally dominant
colour channel is used, which provides colour invariance to a large
extent. Variant methods may also include second order image derivatives,
which act as primitive bar detectors - a useful feature for capturing,
e.g. bar like structures in bicycles and limbs in humans.
The third stage aims to produce an encoding that is sensitive to
local image content while remaining resistant to small changes in
pose or appearance. The adopted method pools gradient orientation
information locally in the same way as the SIFT [2]_
feature. The image window is divided into small spatial regions,
called "cells". For each cell we accumulate a local 1-D histogram
of gradient or edge orientations over all the pixels in the
cell. This combined cell-level 1-D histogram forms the basic
"orientation histogram" representation. Each orientation histogram
divides the gradient angle range into a fixed number of
predetermined bins. The gradient magnitudes of the pixels in the
cell are used to vote into the orientation histogram.
The fourth stage computes normalisation, which takes local groups of
cells and contrast normalises their overall responses before passing
to next stage. Normalisation introduces better invariance to illumination,
shadowing, and edge contrast. It is performed by accumulating a measure
of local histogram "energy" over local groups of cells that we call
"blocks". The result is used to normalise each cell in the block.
Typically each individual cell is shared between several blocks, but
its normalisations are block dependent and thus different. The cell
thus appears several times in the final output vector with different
normalisations. This may seem redundant but it improves the performance.
We refer to the normalised block descriptors as Histogram of Oriented
Gradient (HOG) descriptors.
The final step collects the HOG descriptors from all blocks of a dense
overlapping grid of blocks covering the detection window into a combined
feature vector for use in the window classifier.
References
----------
.. [1] Dalal, N. and Triggs, B., "Histograms of Oriented Gradients for
Human Detection," IEEE Computer Society Conference on Computer
Vision and Pattern Recognition, 2005, San Diego, CA, USA.
.. [2] David G. Lowe, "Distinctive image features from scale-invariant
keypoints," International Journal of Computer Vision, 60, 2 (2004),
pp. 91-110.
"""
import matplotlib.pyplot as plt
from skimage.feature import hog
from skimage import data, color, exposure
image = color.rgb2gray(data.astronaut())
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)
ax1.axis('off')
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title('Input image')
ax1.set_adjustable('box-forced')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
ax2.axis('off')
ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax2.set_title('Histogram of Oriented Gradients')
ax1.set_adjustable('box-forced')
plt.show()
| bsd-3-clause |
austinban/aima-python | submissions/aartiste/myNN.py | 16 | 6217 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (100, 50, ),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
'''
Teach a Neural net to count 2
'''
count22 = DataFrame()
count22.data = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]
count22.feature_names = ['a', 'b', 'c']
count22.target = [0, 0, 0, 1,
0, 1, 1, 0]
count22.target_names = ['Two']
countMLPC = MLPClassifier(
hidden_layer_sizes = (3,), # (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
# learning_rate = 'constant',
# power_t = 0.5,
max_iter = 10, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
verbose = True # False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
Examples = {
# 'TrumpDefault': {
# 'frame': trumpECHP,
# },
# 'TrumpSGD': {
# 'frame': trumpECHP,
# 'mlpc': mlpc
# },
# 'TrumpScaled': {
# 'frame': trumpScaled,
# },
'Count to 2': {
'frame': count22,
'mlpc': countMLPC
}
} | mit |
brookemosby/titanic | TitanicAttempt/TitanicAttempt.py | 1 | 4116 | import pandas as pd
from sklearn.ensemble import RandomForestClassifier as RFC
def Feature_Engineering(DataFrame,train):
"""
Extracts important features and writes them in usable form
Deletes features of little importance
:param DataFrame: This is the file name of a csv file we wish to convert into a usable DataFrame.
:param train: This is training set corresponding to our csv file. Should be of type pandas.DataFrame
:returns: Returns csv file, after having been modified as a pandas.DataFrame type
"""
DataFrame= pd.read_csv(DataFrame)
titles=DataFrame['Name'].apply(lambda x: x.split(',')[1].split(' ')[1])
title_mapping = {"the":5, "Mr.": 1, "Miss.": 2, "Mrs.": 3, "Master.": 4, "Dr.": 5, "Rev.": 6, "Major.": 7, "Col.": 7, "Mlle.": 2, "Mme.": 3, "Don.": 9, "Lady.": 10, "Countess.": 10, "Jonkheer.": 10, "Sir.": 9, "Capt.": 7, "Ms.": 2, "Dona.": 10}
for k,v in title_mapping.items():
titles[titles == k] = v
DataFrame["Title"] = titles
DataFrame['NameLen']=DataFrame['Name'].apply(lambda x: len(x))
DataFrame['FamSize']=DataFrame['SibSp']+DataFrame['Parch']
DataFrame['Has_Cabin'] = DataFrame["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
cabins=DataFrame['Cabin'].apply(lambda x: str(x)[0])
cabin_mapping={'A':3,'B':5,'C':5,'D':4,'E':4,'F':3,'G':2,'T':1,'n':10}
for k,v in cabin_mapping.items():
cabins[cabins==k]=v
DataFrame['Cabin']=cabins
del DataFrame['Parch']
del DataFrame['SibSp']
del DataFrame['PassengerId']
pclass = pd.get_dummies( DataFrame.Pclass , prefix='Pclass' )
sex = pd.get_dummies(DataFrame.Sex)
embarked = pd.get_dummies(DataFrame.Embarked, prefix='Embarked')
DataFrame=pd.concat([DataFrame,pclass,sex,embarked],axis=1)
del DataFrame['Pclass']
del DataFrame['Name']
del DataFrame['Ticket']
del DataFrame['Sex']
del DataFrame['Embarked']
DataFrame['Fare'].fillna(train['Fare'].median(), inplace = True)
# Mapping Fare
DataFrame.loc[ DataFrame['Fare'] <= 7.91, 'Fare'] = 0
DataFrame.loc[(DataFrame['Fare'] > 7.91) & (DataFrame['Fare'] <= 14.454), 'Fare'] = 1
DataFrame.loc[(DataFrame['Fare'] > 14.454) & (DataFrame['Fare'] <= 31), 'Fare'] = 2
DataFrame.loc[ DataFrame['Fare'] > 31, 'Fare'] = 3
DataFrame['Fare'] = DataFrame['Fare'].astype(int)
DataFrame['Age'].fillna(train['Age'].median(), inplace = True)
return DataFrame
def Create_Random_Forest(train):
"""
Fits Random Forest to training set.
:param train: This is the file name of a csv file we wish to have fitted to a Random Forest, does not need to have features already extracted.
:returns: Returns sklearn.ensemble.Random_Forest_Classifier fitted to training set.
"""
trainDF=pd.read_csv(train)
train=Feature_Engineering(train,trainDF)
RF = RFC(min_samples_split=10, n_estimators= 700, criterion= 'gini', max_depth=None)
RF.fit(train.iloc[:, 1:], train.iloc[:, 0])
return RF
def Produce_Predictions(FileName,train,test):
"""
Produces predictions for testing set, based off of training set.
:param FileName: This is the csv file name we wish to have our predictions exported to.
:param train: This is the file name of a csv file that will be the training set.
:param test: This is the file name of the testing set that predictions will be made for.
:returns: Returns nothing, creates csv file containing predictions for testing set.
"""
TestFileName=test
TrainFileName=train
trainDF=pd.read_csv(train)
train=Feature_Engineering(train,trainDF)
test=Feature_Engineering(test,trainDF)
MLA=Create_Random_Forest(TrainFileName)
predictions = MLA.predict(test)
predictions = pd.DataFrame(predictions, columns=['Survived'])
test = pd.read_csv(TestFileName)
predictions = pd.concat((test.iloc[:, 0], predictions), axis = 1)
predictions.to_csv(FileName, sep=",", index = False)
if __name__=="__main__":
Produce_Predictions('TestRun.csv') # pragma: no cover
| mit |
evidation-health/bokeh | bokeh/protocol.py | 1 | 3311 | from __future__ import absolute_import
import json
import logging
import datetime as dt
import calendar
import decimal
from .util.serialization import transform_series, transform_array
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
try:
from dateutil.relativedelta import relativedelta
is_dateutil = True
except ImportError:
is_dateutil = False
from .settings import settings
log = logging.getLogger(__name__)
class BokehJSONEncoder(json.JSONEncoder):
def transform_python_types(self, obj):
"""handle special scalars, default to default json encoder
"""
# Pandas Timestamp
if is_pandas and isinstance(obj, pd.tslib.Timestamp):
return obj.value / 10**6.0 #nanosecond to millisecond
elif np.issubdtype(type(obj), np.float):
return float(obj)
elif np.issubdtype(type(obj), np.int):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Datetime
# FIX ME: handle NaT
# datetime is a subclass of date.
elif isinstance(obj, dt.datetime):
return calendar.timegm(obj.timetuple()) * 1000. + obj.microsecond / 1000.
# Date
elif isinstance(obj, dt.date):
return calendar.timegm(obj.timetuple()) * 1000.
# Numpy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
return (epoch_delta / np.timedelta64(1, 'ms'))
# Time
elif isinstance(obj, dt.time):
return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
# Decimal
elif isinstance(obj, decimal.Decimal):
return float(obj)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from .plot_object import PlotObject
from .properties import HasProps
from .colors import Color
## array types
if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj)
elif isinstance(obj, np.ndarray):
return transform_array(obj)
elif isinstance(obj, PlotObject):
return obj.ref
elif isinstance(obj, HasProps):
return obj.changed_properties_with_values()
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
if settings.pretty(False):
kwargs["indent"] = 4
return json.dumps(obj, cls=encoder, allow_nan=False, **kwargs)
deserialize_json = json.loads
serialize_web = serialize_json
deserialize_web = deserialize_json
def status_obj(status):
return {'msgtype': 'status',
'status': status}
def error_obj(error_msg):
return {
'msgtype': 'error',
'error_msg': error_msg}
| bsd-3-clause |
dhhjx880713/GPy | GPy/models/sparse_gplvm.py | 6 | 1890 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import sys
from .sparse_gp_regression import SparseGPRegression
from ..core import Param
class SparseGPLVM(SparseGPRegression):
"""
Sparse Gaussian Process Latent Variable Model
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
def __init__(self, Y, input_dim, X=None, kernel=None, init='PCA', num_inducing=10):
if X is None:
from ..util.initialization import initialize_latent
X, fracs = initialize_latent(init, input_dim, Y)
X = Param('latent space', X)
SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
self.link_parameter(self.X, 0)
def parameters_changed(self):
super(SparseGPLVM, self).parameters_changed()
self.X.gradient = self.kern.gradients_X_diag(self.grad_dict['dL_dKdiag'], self.X)
self.X.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'], self.X, self.Z)
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
| bsd-3-clause |
shusenl/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
fenixon/tiponpython | pruebas/quadmesh.py | 14 | 1111 | #!/usr/bin/env python
"""
pcolormesh uses a QuadMesh, a faster generalization of pcolor, but
with some restrictions.
This demo illustrates a bug in quadmesh with masked data.
"""
import numpy as np
from matplotlib.pyplot import figure, show, savefig
from matplotlib import cm, colors
from numpy import ma
n = 12
x = np.linspace(-1.5,1.5,n)
y = np.linspace(-1.5,1.5,n*2)
X,Y = np.meshgrid(x,y);
Qx = np.cos(Y) - np.cos(X)
Qz = np.sin(Y) + np.sin(X)
Qx = (Qx + 1.1)
Z = np.sqrt(X**2 + Y**2)/5;
Z = (Z - Z.min()) / (Z.max() - Z.min())
# The color array can include masked values:
Zm = ma.masked_where(np.fabs(Qz) < 0.5*np.amax(Qz), Z)
fig = figure()
ax = fig.add_subplot(121)
ax.set_axis_bgcolor("#bdb76b")
ax.pcolormesh(Qx,Qz,Z, shading='gouraud')
ax.set_title('Without masked values')
ax = fig.add_subplot(122)
ax.set_axis_bgcolor("#bdb76b")
# You can control the color of the masked region:
#cmap = cm.jet
#cmap.set_bad('r', 1.0)
#ax.pcolormesh(Qx,Qz,Zm, cmap=cmap)
# Or use the default, which is transparent:
col = ax.pcolormesh(Qx,Qz,Zm,shading='gouraud')
ax.set_title('With masked values')
show()
| gpl-3.0 |
jflamant/sphericalEMC | sphericalEMC/shellAlignment.py | 1 | 3181 | # file: shellAlignment.py
#
# This code realigns the differents shells obtained via the spherical EMC
# EMC algorithm. Namely, it computes the cross-correlation on SO(3) between
# two successive shells thanks to their spherical harmonic representation.
#
# See also: shellAlignmentLibrary.py
#
# Copyright (c) J. Flamant, April 2016.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please visit http://www.gnu.org
import numpy as np
import matplotlib.pyplot as plt
import healpy as hp
import os
from shellAlignmentLibrary import *
def L2nside(L):
''' Gives the closest nside corresponding to L, such that L <=2nside+1'''
a = np.log((L-1.0)/2)/np.log(2)
nside = int(2**(np.ceil(a)))
return nside
def cleanifyCoeff(IlmArray, Lmax=7):
coeffmax = hp.Alm.getsize(Lmax-1)
lastit = np.nonzero(IlmArray[0, :])[0].max()
for it in range(lastit + 1):
sizeIlm = np.max(np.nonzero(IlmArray[:, it])[0])
if sizeIlm + 1 == coeffmax:
correctIt = it
IlmOut = IlmArray[:coeffmax, correctIt]
return IlmOut
def getReconstructionsAligned(idShell, Ilm_na, Lgrid, Lmax=7, vmin = 1e-4, nside=128):
Ir = np.zeros((hp.nside2npix(nside), np.size(idShell)))
for id, Ilm in enumerate(Ilm_na):
Ilm = np.ascontiguousarray(Ilm)
if id == 0:
'''align the first shell with theoretically calculated intensity'''
Ith = np.load('first_shell_to_start_alignment.npy')
almth = hp.map2alm(Ith, Lmax-1)
almr = alignShells(almth, Ilm, Lmax-1, Lgrid)
rI = hp.alm2map(almr, nside)
neg = np.where(rI < 0)
rI[neg] = vmin
Ir[:, 0] = rI
else:
''' Align the succesive shells with respect to each preceding shell
'''
almr = alignShells(almr, Ilm, Lmax-1, Lgrid)
rI = hp.alm2map(almr, nside)
neg = np.where(rI < 0)
rI[neg] = vmin
Ir[:, id] = rI
return Ir
# choose the shells and parameters
idShell = np.arange(5, 41)
Lmax = 7
Lgrid = 21 # controls the refinment of the SO3 grid
# load the data and clean
path = os.getcwd()
Ilm_na = np.zeros((np.size(idShell), hp.Alm.getsize(Lmax-1)), dtype=complex)
for id, s in enumerate(idShell):
# load estimated Ilm vectors
IlmArray = np.load(os.path.join(path, 'data', 'Ilm'+str(s)+'.npy'))
Ilm_na[id, :] = cleanifyCoeff(IlmArray, Lmax=Lmax)
# align shells
Ir = getReconstructionsAligned(idShell, Ilm_na, Lgrid, Lmax=Lmax, vmin=1e-4, nside=128)
np.save('IrL'+str(Lmax)+'.npy', Ir)
plt.show()
| gpl-3.0 |
akionakamura/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
zrhans/pythonanywhere | pyscripts/ply_SO2.py | 1 | 3997 | """
DATA,Chuva,Chuva_min,Chuva_max,VVE,VVE_min,VVE_max,DVE,DVE_min,DVE_max,
Temp.,Temp._min,Temp._max,Umidade,Umidade_min,Umidade_max,Rad.,Rad._min,Rad._max,
Pres.Atm.,Pres.Atm._min,Pres.Atm._max,
Temp.Int.,Temp.Int._min,Temp.Int._max,
CH4,CH4_min,CH4_max,HCnM,HCnM_min,HCnM_max,HCT,HCT_min,HCT_max,
SO2,SO2_min,SO2_max,
O3,O3_min,O3_max,
NO,NO_min,NO_max,NO2,NO2_min,NO2_max,NOx,NOx_min,NOx_max,
CO,CO_min,CO_max,
MP10,MP10_min,MP10_max,MPT,MPT_min,MPT_max,
Fin,Fin_min,Fin_max,Vin,Vin_min,Vin_max,Vout,Vout_min,Vout_max
"""
import plotly.plotly as py # Every function in this module will communicate with an external plotly server
import plotly.graph_objs as go
import pandas as pd
DATAFILE = r'/home/zrhans/w3/bns/bns_2016-1.csv'
df = pd.read_csv(DATAFILE, parse_dates=True, sep=',', header=0, index_col='DATA')
r0 = df.SO2
t0 = df.DVE
#print(y)
# Definindo as series dedados
trace1 = go.Scatter(
r=r0,#[6.804985785265978, 3.389596010612268, 5.3814721107464445, 8.059540219420184, 5.318229227868589, 2.9850999356273773, 1.9665870023752283, 6.769265408206589, 4.073401898721205, 6.50437182526841, 7.556369818996649, 4.047456094066775, 7.386662496070009, 5.413624736983931, 7.470716531163242, 7.982110216939738, 4.737814080093381, 4.206453042929911, 5.478604804594065, 4.824520280697772, 5.599600609899737, 6.8667952170824735, 3.0856713662561464, 7.771810943227382, 3.6877944350967193, 5.360356685192225, 5.140446739300986, 6.045445680928888, 6.833920940193708, 3.6207694625408364, 3.9894305834039687, 5.3118244995018, 4.608213480282062, 6.640584716151912, 3.055188854482986, 7.492564163752965, 5.4850781777896715, 3.8977949966209358, 5.976245114026165, 5.447061560910957, 5.37703411681004, 4.690805787731301, 4.711640491184845, 3.629919329394875, 5.957668076372498, 5.357121284391151, 3.849235282821748, 6.250507136319218, 7.122243357145468, 3.399404233835391, 3.5105566722713313, 4.100997603660974, 4.096382100199779, 6.233583074805102, 3.939488526772935, 3.9254450773976983, 6.118132501462698, 3.9404503462852323, 7.583015573261159, 3.513202145338516],
t=t0,#[-30.352944361883697, -25.611459854524096, -12.425227452676078, 13.96138051872652, -4.9509328406707445, -25.692274190905437, 12.46876416157031, -4.913764107032951, -10.967380287631935, 30.814194054910676, 2.4749594311442737, 17.97554375239156, 0.7711305933623585, 6.137488485631386, -14.451963574013497, 28.184534112915948, 12.538680065954864, -8.983230337131154, 5.231285164762417, -64.48900253584051, 11.357486681772649, 3.4540747915125176, 13.924346613092862, -25.364002046782343, -16.81800638602268, -10.260051030559755, -13.212134125591882, 2.5793388653025744, 8.717574965852519, -10.675498719239487, -2.926366012522306, 25.195880754767717, 40.59032932155964, -9.121433630189772, -24.297362381339184, -3.1769445056889345, 10.85049841917252, -31.33205974736701, 4.849567462214266, 15.048276954124187, 3.2951046992599635, -6.197091873129837, -8.77857413578066, 29.549174119407287, -5.1374487928814645, 23.02686048794348, -6.634816578371129, 2.7550149918614695, 21.733250113653973, -24.816994960101756, -7.83054706253201, 28.325796210205855, 12.300977467795988, -21.563157240034112, -19.335516283813288, 26.146443170846787, -1.7060712026841085, 16.071723694996702, 2.053266302846965, -5.097911612332572],
mode='markers',
name='CH4',
marker=dict(
color='rgb(230,171,2)',
size=110,
line=dict(
color='white'
),
opacity=0.7
)
)
layout = go.Layout(
title='BSE01 - Dióxido de Enxôfre',
font=dict(
size=15
),
plot_bgcolor='rgb(223, 223, 223)',
angularaxis=dict(
tickcolor='rgb(253,253,253)'
),
orientation=270,
radialaxis=dict(
ticksuffix='ppm'
),
)
#Gerando multiplos diagramas ld
data = [trace1]
fig = go.Figure(data=data, layout=layout)
# Tracando o objeto
py.plot(
fig,
filename='hans/BSE01/2016/ld_SO2', # name of the file as saved in your plotly account
sharing='public'
)
| apache-2.0 |
Titan-C/scikit-learn | sklearn/linear_model/tests/test_sag.py | 36 | 30671 | # Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import _multinomial_grad_loss_all_samples
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.linear_model.base import make_dataset
from sklearn.linear_model.logistic import _multinomial_loss_grad
from sklearn.utils.fixes import logsumexp
from sklearn.utils.extmath import row_norms
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.datasets import make_blobs, load_iris
from sklearn.base import clone
iris = load_iris()
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True, saga=False):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
gradient_correction = update - gradient_memory[idx]
sum_gradient += gradient_correction
gradient_memory[idx] = update
if saga:
weights -= (gradient_correction *
step_size * (1 - 1. / len(seen)))
if fit_intercept:
gradient_correction = (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1. - 1. / len(seen))
if saga:
intercept -= (step_size * intercept_sum_gradient /
len(seen) * decay) + gradient_correction
else:
intercept -= (step_size * intercept_sum_gradient /
len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True, saga=False):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - (gradient_memory[idx] * entry)
sum_gradient += gradient_correction
if saga:
for j in range(n_features):
weights[j] -= (gradient_correction[j] * step_size *
(1 - 1. / len(seen)) / wscale)
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1. - 1. / len(seen))
if saga:
intercept -= ((step_size * intercept_sum_gradient /
len(seen) * decay) +
gradient_correction)
else:
intercept -= (step_size * intercept_sum_gradient /
len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1)) +
fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
for solver in ['sag', 'saga']:
if solver == 'sag':
n_iter = 80
else:
# SAGA variance w.r.t. stream order is higher
n_iter = 300
clf = LogisticRegression(solver=solver, fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
saga=solver == 'saga')
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
saga=solver == 'saga')
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=9)
assert_array_almost_equal(intercept, clf.intercept_, decimal=9)
assert_array_almost_equal(weights2, clf.coef_, decimal=9)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=9)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
# assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
# assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = row_norms(X, squared=True).max()
n_samples = X.shape[0]
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for saga in [True, False]:
for fit_intercept in (True, False):
if saga:
L_sqr = (max_squared_sum + alpha + int(fit_intercept))
L_log = (max_squared_sum + 4.0 * alpha +
int(fit_intercept)) / 4.0
mun_sqr = min(2 * n_samples * alpha, L_sqr)
mun_log = min(2 * n_samples * alpha, L_log)
step_size_sqr = 1 / (2 * L_sqr + mun_sqr)
step_size_log = 1 / (2 * L_log + mun_log)
else:
step_size_sqr = 1.0 / (max_squared_sum +
alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha,
"squared",
fit_intercept,
n_samples=n_samples,
is_saga=saga)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept,
n_samples=n_samples,
is_saga=saga)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
def test_multinomial_loss():
# test if the multinomial loss and gradient computations are consistent
X, y = iris.data, iris.target.astype(np.float64)
n_samples, n_features = X.shape
n_classes = len(np.unique(y))
rng = check_random_state(42)
weights = rng.randn(n_features, n_classes)
intercept = rng.randn(n_classes)
sample_weights = rng.randn(n_samples)
np.abs(sample_weights, sample_weights)
# compute loss and gradient like in multinomial SAG
dataset, _ = make_dataset(X, y, sample_weights, random_state=42)
loss_1, grad_1 = _multinomial_grad_loss_all_samples(dataset, weights,
intercept, n_samples,
n_features, n_classes)
# compute loss and gradient like in multinomial LogisticRegression
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
# comparison
assert_array_almost_equal(grad_1, grad_2)
assert_almost_equal(loss_1, loss_2)
def test_multinomial_loss_ground_truth():
# n_samples, n_features, n_classes = 4, 2, 3
n_classes = 3
X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]])
y = np.array([0, 1, 2, 0])
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]])
intercept = np.array([1., 0, -.2])
sample_weights = np.array([0.8, 1, 1, 0.8])
prediction = np.dot(X, weights) + intercept
logsumexp_prediction = logsumexp(prediction, axis=1)
p = prediction - logsumexp_prediction[:, np.newaxis]
loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum()
diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin)
grad_1 = np.dot(X.T, diff)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
assert_almost_equal(loss_1, loss_2)
assert_array_almost_equal(grad_1, grad_2)
# ground truth
loss_gt = 11.680360354325961
grad_gt = np.array([[-0.557487, -1.619151, +2.176638],
[-0.903942, +5.258745, -4.354803]])
assert_almost_equal(loss_1, loss_gt)
assert_array_almost_equal(grad_1, grad_gt)
| bsd-3-clause |
michaelerule/neurotools | models/rbm/rbm_helper.py | 1 | 6454 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import division
from __future__ import nested_scopes
from __future__ import generators
from __future__ import unicode_literals
from __future__ import print_function
from neurotools.system import *
from neurotools.nlab import *
from neurotools.models.rbm.rbm import *
from neurotools.models.rbm.rbm_sample import *
import neurotools.models.rbm.rbm as rb
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import entropy
from numpy.random import *
import glob
import itertools
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning)
from IPython.core.pylabtools import figsize
figsize(14, 7)
try:
from scipy.misc import logsumexp
except:
from scipy.special import logsumexp
def make_filename(RADIUS, BATCH, REG, COV=False, NUM='', prefix='../data'):
# construct a file name template
regstr = '_reg_'
covstr = '_cov_' if COV else '_'
DIR = '%s/cifar_small_%smulti%sbatch%s%s%s_%s'%(prefix,NUM,covstr,BATCH,regstr,REG,RADIUS)
print('reading from',DIR)
FILENAME = DIR+'/rbm_{}_nh{}_T{}.npz'
return DIR, FILENAME
def get_trials(DIR):
NHIDDENS = np.array([fn[fn.find('_nh') + 3:fn.rfind('_T')]
for fn in glob.glob(DIR + '/*para*')]).astype(int)
NHIDDENS.sort()
print('hidden units used in simulation: ', NHIDDENS)
TEMPERATURES = ([float(fn[fn.find('_T') + 2:fn.rfind('.npz')])
for fn in glob.glob(DIR + '/*fim*_nh' + str(NHIDDENS[0]) + '_*')])
TEMPERATURES.sort()
print('temperatures used in simulation: ', TEMPERATURES)
return NHIDDENS, TEMPERATURES
def scattercompare(x,y,xl='',yl='',
tit=None,
nbins=10,
idline=None,
adaptlimits=True,
meanline=True,
shadevariance=False):
'''
Plots scatter plot
Estimates mean of dependent variable in histogram bins of dependent
variable
Plots identity line as well
'''
y = np.array(y)
x = np.array(x)
plt.scatter(x,y,0.1,color=AZURE)
if meanline:
order = np.argsort(x)
m = neurotools.signal.box_filter(y[order],int(np.sqrt(len(x))))
plt.plot(x[order],m,color=BLACK,lw=2.5)
if shadevariance:
mm = neurotools.signal.box_filter((y**2)[order],int(np.sqrt(len(x))))
v = mm - m*m
s = np.sqrt(v)
e = 1.96*s
plt.fill_between(x[order],m-e,m+e,color=(0.1,)*4)
neurotools.graphics.plot.simpleaxis()
plt.xlabel(xl)
plt.ylabel(yl)
if tit is None:
tit = '%s vs. %s'%(yl,xl) if xl!='' and yl!='' else ''
plt.title(tit)
neurotools.graphics.plot.simpleaxis()
xlim(np.min(x),np.max(x))
if np.all(y>=0) and ylim()[0]<=0:
ylim(0,ylim()[1])
# Clever limits
xlim(np.min(x),np.max(x))
yl = ylim()
if adaptlimits:
xmax = round_to_precision(percentile(x,99),3)
xmax = min(xmax,xlim()[1])
xlim(xlim()[0],xmax)
usey = y[x<=xmax]
ymax = round_to_precision(percentile(usey,99),1)
ylim(ylim()[0],ymax)
yl = ylim()
# Identity lines
if idline is True or idline==1:
t = linspace(*(xlim()+(10,)))
plot(t,t,color=RUST,lw=2)
ylim(*yl)
nicey()
fudgey(12)
def barcompare(x,y,xl='',yl='',
tit=None,
nbins=10,
skip=100,
idline=None,
adaptlimits=True,
meanline=True,
shadevariance=False):
'''
Plots bar plot
Estimates mean of dependent variable in histogram bins of dependent
variable
Plots identity line as well
'''
y = np.array(y)
x = np.array(x)
bins = array(sorted(x))[::skip]
nbins = len(bins)-1
means,stds,sems = [],[],[]
Deltae = (bins[1:]+bins[:-1])*0.5
for i in range(nbins):
a = bins[i]
b = bins[i+1]
ok = (x>=a) & (x<b)
get = y[ok]
n = sum(ok)
v = var(get)
sem = sqrt(v/n)
means.append(mean(get))
stds.append(sqrt(v))
sems.append(sem*1.96)
mu = array(means)
sigma = array(stds)
dmu = array(sems)
scatter(Deltae,mu,0.1)
plt.errorbar(Deltae, mu, sigma, fmt='.', markersize=4, lw=1, label=u'Observations',zorder=inf)
plot(bins,bins,color=RUST)
ylim(0,20)
xlim(5,20)
nicey()
simpleaxis()
if meanline:
order = np.argsort(x)
m = neurotools.signal.box_filter(y[order],int(np.sqrt(len(x))))
plt.plot(x[order],m,color=BLACK,lw=0.85)
if shadevariance:
mm = neurotools.signal.box_filter((y**2)[order],int(np.sqrt(len(x))))
v = mm - m*m
s = np.sqrt(v)
e = 1.96*s
plt.fill_between(x[order],m-e,m+e,color=(0.1,)*4)
neurotools.graphics.plot.simpleaxis()
plt.xlabel(xl)
plt.ylabel(yl)
if tit is None:
tit = '%s vs. %s'%(yl,xl) if xl!='' and yl!='' else ''
plt.title(tit)
neurotools.graphics.plot.simpleaxis()
xlim(np.min(x),np.max(x))
if np.all(y>=0) and ylim()[0]<=0:
ylim(0,ylim()[1])
# Clever limits
xlim(np.min(x),np.max(x))
yl = ylim()
if adaptlimits:
xmax = round_to_precision(percentile(x,99),3)
xmax = min(xmax,xlim()[1])
xlim(xlim()[0],xmax)
usey = y[x<=xmax]
ymax = round_to_precision(percentile(usey,99),1)
ylim(ylim()[0],ymax)
yl = ylim()
# Identity lines
if idline is True or idline==1:
t = linspace(*(xlim()+(10,)))
plot(t,t,color=RUST,lw=2)
ylim(*yl)
nicey()
fudgey(12)
def zipfplot(Eh):
'''
Zipf's law plot for an energy distribution
'''
# PDF approach
Eh = np.array(sorted(Eh))
HEh = -slog(np.diff(Eh))
CDF = np.cumsum(Eh)
HEh_smoothed = neurotools.signal.box_filter(HEh,int(sqrt(len(HEh))))
plot(Eh[1:],HEh_smoothed,color=OCHRE,lw=2,label='PDF method')
plot(Eh,log(CDF),color=AZURE,lw=2,label='CDF method')
xlim(min(Eh),max(Eh))
ylim(min(Eh),max(Eh))
simpleaxis()
t = linspace(*(xlim()+(10,)))
plot(t,t,color=RUST,lw=1.5)
xlabel('Energy')
ylabel('Entropy')
nice_legend()
| gpl-3.0 |
wrobstory/seaborn | seaborn/tests/test_categorical.py | 11 | 75616 | import numpy as np
import pandas as pd
import scipy
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
from .. import categorical as cat
from .. import palettes
class CategoricalFixture(object):
"""Test boxplot (also base class for things like violinplots)."""
rs = np.random.RandomState(30)
n_total = 60
x = rs.randn(n_total / 3, 3)
x_df = pd.DataFrame(x, columns=pd.Series(list("XYZ"), name="big"))
y = pd.Series(rs.randn(n_total), name="y_data")
g = pd.Series(np.repeat(list("abc"), n_total / 3), name="small")
h = pd.Series(np.tile(list("mn"), n_total / 2), name="medium")
u = pd.Series(np.tile(list("jkh"), n_total / 3))
df = pd.DataFrame(dict(y=y, g=g, h=h, u=u))
x_df["W"] = g
class TestCategoricalPlotter(CategoricalFixture):
def test_wide_df_data(self):
p = cat._CategoricalPlotter()
# Test basic wide DataFrame
p.establish_variables(data=self.x_df)
# Check data attribute
for x, y, in zip(p.plot_data, self.x_df[["X", "Y", "Z"]].values.T):
npt.assert_array_equal(x, y)
# Check semantic attributes
nt.assert_equal(p.orient, "v")
nt.assert_is(p.plot_hues, None)
nt.assert_is(p.group_label, "big")
nt.assert_is(p.value_label, None)
# Test wide dataframe with forced horizontal orientation
p.establish_variables(data=self.x_df, orient="horiz")
nt.assert_equal(p.orient, "h")
# Text exception by trying to hue-group with a wide dataframe
with nt.assert_raises(ValueError):
p.establish_variables(hue="d", data=self.x_df)
def test_1d_input_data(self):
p = cat._CategoricalPlotter()
# Test basic vector data
x_1d_array = self.x.ravel()
p.establish_variables(data=x_1d_array)
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test basic vector data in list form
x_1d_list = x_1d_array.tolist()
p.establish_variables(data=x_1d_list)
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test an object array that looks 1D but isn't
x_notreally_1d = np.array([self.x.ravel(),
self.x.ravel()[:self.n_total / 2]])
p.establish_variables(data=x_notreally_1d)
nt.assert_equal(len(p.plot_data), 2)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_equal(len(p.plot_data[1]), self.n_total / 2)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_2d_input_data(self):
p = cat._CategoricalPlotter()
x = self.x[:, 0]
# Test vector data that looks 2D but doesn't really have columns
p.establish_variables(data=x[:, np.newaxis])
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.x.shape[0])
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test vector data that looks 2D but doesn't really have rows
p.establish_variables(data=x[np.newaxis, :])
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.x.shape[0])
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_3d_input_data(self):
p = cat._CategoricalPlotter()
# Test that passing actually 3D data raises
x = np.zeros((5, 5, 5))
with nt.assert_raises(ValueError):
p.establish_variables(data=x)
def test_list_of_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in list form
x_list = self.x.T.tolist()
p.establish_variables(data=x_list)
nt.assert_equal(len(p.plot_data), 3)
lengths = [len(v_i) for v_i in p.plot_data]
nt.assert_equal(lengths, [self.n_total / 3] * 3)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_wide_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in array form
p.establish_variables(data=self.x)
nt.assert_equal(np.shape(p.plot_data), (3, self.n_total / 3))
npt.assert_array_equal(p.plot_data, self.x.T)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_single_long_direct_inputs(self):
p = cat._CategoricalPlotter()
# Test passing a series to the x variable
p.establish_variables(x=self.y)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y_data")
nt.assert_is(p.group_label, None)
# Test passing a series to the y variable
p.establish_variables(y=self.y)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y_data")
nt.assert_is(p.group_label, None)
# Test passing an array to the y variable
p.establish_variables(y=self.y.values)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_is(p.value_label, None)
nt.assert_is(p.group_label, None)
def test_single_long_indirect_inputs(self):
p = cat._CategoricalPlotter()
# Test referencing a DataFrame series in the x variable
p.establish_variables(x="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y")
nt.assert_is(p.group_label, None)
# Test referencing a DataFrame series in the y variable
p.establish_variables(y="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y")
nt.assert_is(p.group_label, None)
def test_longform_groupby(self):
p = cat._CategoricalPlotter()
# Test a vertically oriented grouped and nested plot
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(len(p.plot_data), 3)
nt.assert_equal(len(p.plot_hues), 3)
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y")
nt.assert_equal(p.group_label, "g")
nt.assert_equal(p.hue_title, "h")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test a grouped and nested plot with direct array value data
p.establish_variables("g", self.y.values, "h", self.df)
nt.assert_is(p.value_label, None)
nt.assert_equal(p.group_label, "g")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test a grouped and nested plot with direct array hue data
p.establish_variables("g", "y", self.h.values, self.df)
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test categorical grouping data
if pandas_has_categoricals:
df = self.df.copy()
df.g = df.g.astype("category")
# Test that horizontal orientation is automatically detected
p.establish_variables("y", "g", "h", data=df)
nt.assert_equal(len(p.plot_data), 3)
nt.assert_equal(len(p.plot_hues), 3)
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y")
nt.assert_equal(p.group_label, "g")
nt.assert_equal(p.hue_title, "h")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
def test_input_validation(self):
p = cat._CategoricalPlotter()
kws = dict(x="g", y="y", hue="h", units="u", data=self.df)
for input in ["x", "y", "hue", "units"]:
input_kws = kws.copy()
input_kws[input] = "bad_input"
with nt.assert_raises(ValueError):
p.establish_variables(**input_kws)
def test_order(self):
p = cat._CategoricalPlotter()
# Test inferred order from a wide dataframe input
p.establish_variables(data=self.x_df)
nt.assert_equal(p.group_names, ["X", "Y", "Z"])
# Test specified order with a wide dataframe input
p.establish_variables(data=self.x_df, order=["Y", "Z", "X"])
nt.assert_equal(p.group_names, ["Y", "Z", "X"])
for group, vals in zip(["Y", "Z", "X"], p.plot_data):
npt.assert_array_equal(vals, self.x_df[group])
with nt.assert_raises(ValueError):
p.establish_variables(data=self.x, order=[1, 2, 0])
# Test inferred order from a grouped longform input
p.establish_variables("g", "y", data=self.df)
nt.assert_equal(p.group_names, ["a", "b", "c"])
# Test specified order from a grouped longform input
p.establish_variables("g", "y", data=self.df, order=["b", "a", "c"])
nt.assert_equal(p.group_names, ["b", "a", "c"])
for group, vals in zip(["b", "a", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test inferred order from a grouped input with categorical groups
if pandas_has_categoricals:
df = self.df.copy()
df.g = df.g.astype("category")
df.g = df.g.cat.reorder_categories(["c", "b", "a"])
p.establish_variables("g", "y", data=df)
nt.assert_equal(p.group_names, ["c", "b", "a"])
for group, vals in zip(["c", "b", "a"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
df.g = (df.g.cat.add_categories("d")
.cat.reorder_categories(["c", "b", "d", "a"]))
p.establish_variables("g", "y", data=df)
nt.assert_equal(p.group_names, ["c", "b", "d", "a"])
def test_hue_order(self):
p = cat._CategoricalPlotter()
# Test inferred hue order
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.hue_names, ["m", "n"])
# Test specified hue order
p.establish_variables("g", "y", "h", data=self.df,
hue_order=["n", "m"])
nt.assert_equal(p.hue_names, ["n", "m"])
# Test inferred hue order from a categorical hue input
if pandas_has_categoricals:
df = self.df.copy()
df.h = df.h.astype("category")
df.h = df.h.cat.reorder_categories(["n", "m"])
p.establish_variables("g", "y", "h", data=df)
nt.assert_equal(p.hue_names, ["n", "m"])
df.h = (df.h.cat.add_categories("o")
.cat.reorder_categories(["o", "m", "n"]))
p.establish_variables("g", "y", "h", data=df)
nt.assert_equal(p.hue_names, ["o", "m", "n"])
def test_plot_units(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_is(p.plot_units, None)
p.establish_variables("g", "y", "h", data=self.df, units="u")
for group, units in zip(["a", "b", "c"], p.plot_units):
npt.assert_array_equal(units, self.u[self.g == group])
def test_infer_orient(self):
p = cat._CategoricalPlotter()
cats = pd.Series(["a", "b", "c"] * 10)
nums = pd.Series(self.rs.randn(30))
nt.assert_equal(p.infer_orient(cats, nums), "v")
nt.assert_equal(p.infer_orient(nums, cats), "h")
nt.assert_equal(p.infer_orient(nums, None), "h")
nt.assert_equal(p.infer_orient(None, nums), "v")
nt.assert_equal(p.infer_orient(nums, nums, "vert"), "v")
nt.assert_equal(p.infer_orient(nums, nums, "hori"), "h")
with nt.assert_raises(ValueError):
p.infer_orient(cats, cats)
if pandas_has_categoricals:
cats = pd.Series([0, 1, 2] * 10, dtype="category")
nt.assert_equal(p.infer_orient(cats, nums), "v")
nt.assert_equal(p.infer_orient(nums, cats), "h")
with nt.assert_raises(ValueError):
p.infer_orient(cats, cats)
def test_default_palettes(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 3))
# Test palette mapping the hue position
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors(None, None, 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 2))
def test_default_palette_with_many_levels(self):
with palettes.color_palette(["blue", "red"], 2):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
npt.assert_array_equal(p.colors, palettes.husl_palette(3, l=.7))
def test_specific_color(self):
p = cat._CategoricalPlotter()
# Test the same color for each x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", None, 1)
blue_rgb = mpl.colors.colorConverter.to_rgb("blue")
nt.assert_equal(p.colors, [blue_rgb] * 3)
# Test a color-based blend for the hue mapping
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors("#ff0022", None, 1)
rgba_array = np.array(palettes.light_palette("#ff0022", 2))
npt.assert_array_almost_equal(p.colors,
rgba_array[:, :3])
def test_specific_palette(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, "dark", 1)
nt.assert_equal(p.colors, palettes.color_palette("dark", 3))
# Test that non-None `color` and `hue` raises an error
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors(None, "muted", 1)
nt.assert_equal(p.colors, palettes.color_palette("muted", 2))
# Test that specified palette overrides specified color
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", "deep", 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 3))
def test_dict_as_palette(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", "h", data=self.df)
pal = {"m": (0, 0, 1), "n": (1, 0, 0)}
p.establish_colors(None, pal, 1)
nt.assert_equal(p.colors, [(0, 0, 1), (1, 0, 0)])
def test_palette_desaturation(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors((0, 0, 1), None, .5)
nt.assert_equal(p.colors, [(.25, .25, .75)] * 3)
p.establish_colors(None, [(0, 0, 1), (1, 0, 0), "w"], .5)
nt.assert_equal(p.colors, [(.25, .25, .75),
(.75, .25, .25),
(1, 1, 1)])
class TestCategoricalStatPlotter(CategoricalFixture):
def test_no_bootstrappig(self):
p = cat._CategoricalStatPlotter()
p.establish_variables("g", "y", data=self.df)
p.estimate_statistic(np.mean, None, 100)
npt.assert_array_equal(p.confint, np.array([]))
p.establish_variables("g", "y", "h", data=self.df)
p.estimate_statistic(np.mean, None, 100)
npt.assert_array_equal(p.confint, np.array([[], [], []]))
def test_single_layer_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000)
nt.assert_equal(p.statistic.shape, (3,))
nt.assert_equal(p.confint.shape, (3, 2))
npt.assert_array_almost_equal(p.statistic,
y.groupby(g).mean())
for ci, (_, grp_y) in zip(p.confint, y.groupby(g)):
sem = stats.sem(grp_y)
mean = grp_y.mean()
stats.norm.ppf(.975)
half_ci = stats.norm.ppf(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_single_layer_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
y = pd.Series(np.random.RandomState(0).randn(270))
u = pd.Series(np.repeat(np.tile(list("xyz"), 30), 3))
y[u == "x"] -= 3
y[u == "y"] += 3
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, units=u)
p.estimate_statistic(np.mean, 95, 10000)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 1] - ci1[:, 0]
ci2_size = ci2[:, 1] - ci2[:, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_single_layer_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, order=list("abdc"))
p.estimate_statistic(np.mean, 95, 10000)
nt.assert_equal(p.statistic.shape, (4,))
nt.assert_equal(p.confint.shape, (4, 2))
mean = y[g == "b"].mean()
sem = stats.sem(y[g == "b"])
half_ci = stats.norm.ppf(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1], mean)
npt.assert_array_almost_equal(p.confint[1], ci, 2)
npt.assert_equal(p.statistic[2], np.nan)
npt.assert_array_equal(p.confint[2], (np.nan, np.nan))
def test_nested_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 50000)
nt.assert_equal(p.statistic.shape, (3, 2))
nt.assert_equal(p.confint.shape, (3, 2, 2))
npt.assert_array_almost_equal(p.statistic,
y.groupby([g, h]).mean().unstack())
for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
sem = stats.sem(hue_y)
mean = hue_y.mean()
half_ci = stats.norm.ppf(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_nested_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
h = pd.Series(np.tile(list("xy"), 135))
u = pd.Series(np.repeat(list("ijkijk"), 45))
y = pd.Series(np.random.RandomState(0).randn(270))
y[u == "i"] -= 3
y[u == "k"] += 3
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 10000)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, h, units=u)
p.estimate_statistic(np.mean, 95, 10000)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 0, 1] - ci1[:, 0, 0]
ci2_size = ci2[:, 0, 1] - ci2[:, 0, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_nested_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
h = pd.Series(np.tile(list("xy"), 150))
p.establish_variables(g, y, h,
order=list("abdc"),
hue_order=list("zyx"))
p.estimate_statistic(np.mean, 95, 50000)
nt.assert_equal(p.statistic.shape, (4, 3))
nt.assert_equal(p.confint.shape, (4, 3, 2))
mean = y[(g == "b") & (h == "x")].mean()
sem = stats.sem(y[(g == "b") & (h == "x")])
half_ci = stats.norm.ppf(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1, 2], mean)
npt.assert_array_almost_equal(p.confint[1, 2], ci, 2)
npt.assert_array_equal(p.statistic[:, 0], [np.nan] * 4)
npt.assert_array_equal(p.statistic[2], [np.nan] * 3)
npt.assert_array_equal(p.confint[:, 0],
np.zeros((4, 2)) * np.nan)
npt.assert_array_equal(p.confint[2],
np.zeros((3, 2)) * np.nan)
def test_estimator_value_label(self):
p = cat._CategoricalStatPlotter()
p.establish_variables("g", "y", data=self.df)
p.estimate_statistic(np.mean, None, 100)
nt.assert_equal(p.value_label, "mean(y)")
p = cat._CategoricalStatPlotter()
p.establish_variables("g", "y", data=self.df)
p.estimate_statistic(np.median, None, 100)
nt.assert_equal(p.value_label, "median(y)")
def test_draw_cis(self):
p = cat._CategoricalStatPlotter()
# Test vertical CIs
p.orient = "v"
f, ax = plt.subplots()
at_group = [0, 1]
confints = [(.5, 1.5), (.25, .8)]
colors = [".2", ".3"]
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, [at, at])
npt.assert_array_equal(y, ci)
nt.assert_equal(line.get_color(), c)
plt.close("all")
# Test horizontal CIs
p.orient = "h"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, ci)
npt.assert_array_equal(y, [at, at])
nt.assert_equal(line.get_color(), c)
plt.close("all")
# Test extra keyword arguments
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, lw=4)
line = ax.lines[0]
nt.assert_equal(line.get_linewidth(), 4)
plt.close("all")
class TestBoxPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, width=.8,
fliersize=5, linewidth=None)
def test_nested_width(self):
p = cat._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .4 * .98)
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .3 * .98)
def test_hue_offsets(self):
p = cat._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.2, .2])
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.15, .15])
p = cat._BoxPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
npt.assert_array_almost_equal(p.hue_offsets, [-.2, 0, .2])
def test_axes_data(self):
ax = cat.boxplot("g", "y", data=self.df)
nt.assert_equal(len(ax.artists), 3)
plt.close("all")
ax = cat.boxplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.artists), 6)
plt.close("all")
def test_box_colors(self):
ax = cat.boxplot("g", "y", data=self.df, saturation=1)
pal = palettes.color_palette("deep", 3)
for patch, color in zip(ax.artists, pal):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
ax = cat.boxplot("g", "y", "h", data=self.df, saturation=1)
pal = palettes.color_palette("deep", 2)
for patch, color in zip(ax.artists, pal * 2):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
def test_draw_missing_boxes(self):
ax = cat.boxplot("g", "y", data=self.df,
order=["a", "b", "c", "d"])
nt.assert_equal(len(ax.artists), 3)
plt.close("all")
def test_missing_data(self):
x = ["a", "a", "b", "b", "c", "c", "d", "d"]
h = ["x", "y", "x", "y", "x", "y", "x", "y"]
y = self.rs.randn(8)
y[-2:] = np.nan
ax = cat.boxplot(x, y)
nt.assert_equal(len(ax.artists), 3)
plt.close("all")
y[-1] = 0
ax = cat.boxplot(x, y, h)
nt.assert_equal(len(ax.artists), 7)
plt.close("all")
def test_boxplots(self):
# Smoke test the high level boxplot options
cat.boxplot("y", data=self.df)
plt.close("all")
cat.boxplot(y="y", data=self.df)
plt.close("all")
cat.boxplot("g", "y", data=self.df)
plt.close("all")
cat.boxplot("y", "g", data=self.df, orient="h")
plt.close("all")
cat.boxplot("g", "y", "h", data=self.df)
plt.close("all")
cat.boxplot("g", "y", "h", order=list("nabc"), data=self.df)
plt.close("all")
cat.boxplot("g", "y", "h", hue_order=list("omn"), data=self.df)
plt.close("all")
cat.boxplot("y", "g", "h", data=self.df, orient="h")
plt.close("all")
def test_axes_annotation(self):
ax = cat.boxplot("g", "y", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
nt.assert_equal(ax.get_xlim(), (-.5, 2.5))
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
plt.close("all")
ax = cat.boxplot("g", "y", "h", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
npt.assert_array_equal([l.get_text() for l in ax.legend_.get_texts()],
["m", "n"])
plt.close("all")
ax = cat.boxplot("y", "g", data=self.df, orient="h")
nt.assert_equal(ax.get_xlabel(), "y")
nt.assert_equal(ax.get_ylabel(), "g")
nt.assert_equal(ax.get_ylim(), (2.5, -.5))
npt.assert_array_equal(ax.get_yticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_yticklabels()],
["a", "b", "c"])
plt.close("all")
class TestViolinPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True,
gridsize=100, width=.8, inner="box", split=False,
orient=None, linewidth=None,
color=None, palette=None, saturation=.75)
def test_split_error(self):
kws = self.default_kws.copy()
kws.update(dict(x="h", y="y", hue="g", data=self.df, split=True))
with nt.assert_raises(ValueError):
cat._ViolinPlotter(**kws)
def test_no_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[0]), 20)
nt.assert_equal(len(p.support[1]), 0)
nt.assert_equal(len(p.density[0]), 20)
nt.assert_equal(len(p.density[1]), 1)
nt.assert_equal(p.density[1].item(), 1)
p.estimate_densities("scott", 2, "count", True, 20)
nt.assert_equal(p.density[1].item(), 0)
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
p.establish_variables(x, y, h)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[1][0]), 20)
nt.assert_equal(len(p.support[1][1]), 0)
nt.assert_equal(len(p.density[1][0]), 20)
nt.assert_equal(len(p.density[1][1]), 1)
nt.assert_equal(p.density[1][1].item(), 1)
p.estimate_densities("scott", 2, "count", False, 20)
nt.assert_equal(p.density[1][1].item(), 0)
def test_single_observation(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[0]), 20)
nt.assert_equal(len(p.support[1]), 1)
nt.assert_equal(len(p.density[0]), 20)
nt.assert_equal(len(p.density[1]), 1)
nt.assert_equal(p.density[1].item(), 1)
p.estimate_densities("scott", 2, "count", True, 20)
nt.assert_equal(p.density[1].item(), .5)
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
p.establish_variables(x, y, h)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[1][0]), 20)
nt.assert_equal(len(p.support[1][1]), 1)
nt.assert_equal(len(p.density[1][0]), 20)
nt.assert_equal(len(p.density[1][1]), 1)
nt.assert_equal(p.density[1][1].item(), 1)
p.estimate_densities("scott", 2, "count", False, 20)
nt.assert_equal(p.density[1][1].item(), .5)
def test_dwidth(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .4)
kws.update(dict(width=.4))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .2)
kws.update(dict(hue="h", width=.8))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .2)
kws.update(dict(split=True))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .4)
def test_scale_area(self):
kws = self.default_kws.copy()
kws["scale"] = "area"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
max_before = np.array([d.max() for d in density])
p.scale_area(density, max_before, False)
max_after = np.array([d.max() for d in density])
nt.assert_equal(max_after[0], 1)
before_ratio = max_before[1] / max_before[0]
after_ratio = max_after[1] / max_after[0]
nt.assert_equal(before_ratio, after_ratio)
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, False)
max_after = np.array([[r.max() for r in row] for row in density])
nt.assert_equal(max_after[0, 0], 1)
before_ratio = max_before[1, 1] / max_before[0, 0]
after_ratio = max_after[1, 1] / max_after[0, 0]
nt.assert_equal(before_ratio, after_ratio)
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, True)
max_after = np.array([[r.max() for r in row] for row in density])
nt.assert_equal(max_after[0, 0], 1)
nt.assert_equal(max_after[1, 0], 1)
before_ratio = max_before[1, 1] / max_before[1, 0]
after_ratio = max_after[1, 1] / max_after[1, 0]
nt.assert_equal(before_ratio, after_ratio)
def test_scale_width(self):
kws = self.default_kws.copy()
kws["scale"] = "width"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
p.scale_width(density)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [1, 1])
# Test nested grouping
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
p.scale_width(density)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[1, 1], [1, 1]])
def test_scale_count(self):
kws = self.default_kws.copy()
kws["scale"] = "count"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 20), self.rs.uniform(0, .2, 40)]
counts = np.array([20, 40])
p.scale_count(density, counts, False)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [.5, 1])
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, False)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.05, .4], [1, .5]])
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, True)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.125, 1], [1, .5]])
def test_bad_scale(self):
kws = self.default_kws.copy()
kws["scale"] = "not_a_scale_type"
with nt.assert_raises(ValueError):
cat._ViolinPlotter(**kws)
def test_kde_fit(self):
p = cat._ViolinPlotter(**self.default_kws)
data = self.y
data_std = data.std(ddof=1)
# Bandwidth behavior depends on scipy version
if LooseVersion(scipy.__version__) < "0.11":
# Test ignoring custom bandwidth on old scipy
kde, bw = p.fit_kde(self.y, .2)
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, kde.scotts_factor())
else:
# Test reference rule bandwidth
kde, bw = p.fit_kde(data, "scott")
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, kde.scotts_factor())
nt.assert_equal(bw, kde.scotts_factor() * data_std)
# Test numeric scale factor
kde, bw = p.fit_kde(self.y, .2)
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, .2)
nt.assert_equal(bw, .2 * data_std)
def test_draw_to_density(self):
p = cat._ViolinPlotter(**self.default_kws)
# p.dwidth will be 1 for easier testing
p.width = 2
# Test verical plots
support = np.array([.2, .6])
density = np.array([.1, .4])
# Test full vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test left vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, 0])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test right vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Switch orientation to test horizontal plots
p.orient = "h"
support = np.array([.2, .5])
density = np.array([.3, .7])
# Test full horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, .99 * .7])
plt.close("all")
# Test left horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, 0])
plt.close("all")
# Test right horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [0, .99 * .7])
plt.close("all")
def test_draw_single_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
p.width = 2
# Test vertical plot
_, ax = plt.subplots()
p.draw_single_observation(ax, 1, 1.5, 1)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, 2])
npt.assert_array_equal(y, [1.5, 1.5])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_single_observation(ax, 2, 2.2, .5)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [2.2, 2.2])
npt.assert_array_equal(y, [1.5, 2.5])
plt.close("all")
def test_draw_box_lines(self):
# Test vertical plot
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
nt.assert_equal(len(ax.lines), 2)
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
_, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(y, [q25, q75])
_, y = ax.collections[0].get_offsets().T
nt.assert_equal(y, q50)
plt.close("all")
# Test horizontal plot
kws = self.default_kws.copy()
kws.update(dict(x="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
nt.assert_equal(len(ax.lines), 2)
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
x, _ = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, [q25, q75])
x, _ = ax.collections[0].get_offsets().T
nt.assert_equal(x, q50)
plt.close("all")
def test_draw_quartiles(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_quartiles(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(np.percentile(self.y, [25, 50, 75]), ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
plt.close("all")
def test_draw_points(self):
p = cat._ViolinPlotter(**self.default_kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, np.zeros_like(self.y))
npt.assert_array_equal(y, self.y)
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.y)
npt.assert_array_equal(y, np.zeros_like(self.y))
plt.close("all")
def test_draw_sticks(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
x, _ = line.get_xydata().T
npt.assert_array_equal(x, [val, val])
plt.close("all")
def test_validate_inner(self):
kws = self.default_kws.copy()
kws.update(dict(inner="bad_inner"))
with nt.assert_raises(ValueError):
cat._ViolinPlotter(**kws)
def test_draw_violinplots(self):
kws = self.default_kws.copy()
# Test single vertical violin
kws.update(dict(y="y", data=self.df, inner=None,
saturation=1, color=(1, 0, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(1, 0, 0, 1)])
plt.close("all")
# Test single horizontal violin
kws.update(dict(x="y", y=None, color=(0, 1, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(0, 1, 0, 1)])
plt.close("all")
# Test multiple vertical violins
kws.update(dict(x="g", y="y", color=None,))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
for violin, color in zip(ax.collections, palettes.color_palette()):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple violins with hue nesting
kws.update(dict(hue="h"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 6)
for violin, color in zip(ax.collections,
palettes.color_palette(n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple split violins
kws.update(dict(split=True, palette="muted"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 6)
for violin, color in zip(ax.collections,
palettes.color_palette("muted",
n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
def test_draw_violinplots_no_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 0)
plt.close("all")
# Test nested hue grouping
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 0)
plt.close("all")
def test_draw_violinplots_single_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
# Test nested hue grouping
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
# Test nested hue grouping with split
kws["split"] = True
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
def test_violinplots(self):
# Smoke test the high level violinplot options
cat.violinplot("y", data=self.df)
plt.close("all")
cat.violinplot(y="y", data=self.df)
plt.close("all")
cat.violinplot("g", "y", data=self.df)
plt.close("all")
cat.violinplot("y", "g", data=self.df, orient="h")
plt.close("all")
cat.violinplot("g", "y", "h", data=self.df)
plt.close("all")
cat.violinplot("g", "y", "h", order=list("nabc"), data=self.df)
plt.close("all")
cat.violinplot("g", "y", "h", hue_order=list("omn"), data=self.df)
plt.close("all")
cat.violinplot("y", "g", "h", data=self.df, orient="h")
plt.close("all")
for inner in ["box", "quart", "point", "stick", None]:
cat.violinplot("g", "y", data=self.df, inner=inner)
plt.close("all")
cat.violinplot("g", "y", "h", data=self.df, inner=inner)
plt.close("all")
cat.violinplot("g", "y", "h", data=self.df,
inner=inner, split=True)
plt.close("all")
class TestStripPlotter(CategoricalFixture):
def test_stripplot_vertical(self):
pal = palettes.color_palette()
ax = cat.stripplot("g", "y", data=self.df)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i)
npt.assert_array_equal(y, vals)
npt.assert_equal(ax.collections[i].get_facecolors()[0, :3], pal[i])
plt.close("all")
@skipif(not pandas_has_categoricals)
def test_stripplot_horiztonal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = cat.stripplot("y", "g", data=df)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i)
plt.close("all")
def test_stripplot_jitter(self):
pal = palettes.color_palette()
ax = cat.stripplot("g", "y", data=self.df, jitter=True)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_less(np.ones(len(x)) * i - .1, x)
npt.assert_array_less(x, np.ones(len(x)) * i + .1)
npt.assert_array_equal(y, vals)
npt.assert_equal(ax.collections[i].get_facecolors()[0, :3], pal[i])
plt.close("all")
def test_split_nested_stripplot_vertical(self):
pal = palettes.color_palette()
ax = cat.stripplot("g", "y", "h", data=self.df)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i + [-.2, .2][j])
npt.assert_array_equal(y, vals)
fc = ax.collections[i * 2 + j].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[j])
plt.close("all")
@skipif(not pandas_has_categoricals)
def test_split_nested_stripplot_horizontal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = cat.stripplot("y", "g", "h", data=df)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i + [-.2, .2][j])
plt.close("all")
def test_unsplit_nested_stripplot_vertical(self):
pal = palettes.color_palette()
# Test a simple vertical strip plot
ax = cat.stripplot("g", "y", "h", data=self.df, split=False)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i)
npt.assert_array_equal(y, vals)
fc = ax.collections[i * 2 + j].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[j])
plt.close("all")
@skipif(not pandas_has_categoricals)
def test_unsplit_nested_stripplot_horizontal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = cat.stripplot("y", "g", "h", data=df, split=False)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i)
plt.close("all")
class TestBarPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, errcolor=".26")
def test_nested_width(self):
kws = self.default_kws.copy()
p = cat._BarPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .8 / 2)
p = cat._BarPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
nt.assert_equal(p.nested_width, .8 / 3)
def test_draw_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(p.plot_data))
nt.assert_equal(len(ax.lines), len(p.plot_data))
for bar, color in zip(ax.patches, p.colors):
nt.assert_equal(bar.get_facecolor()[:-1], color)
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
nt.assert_equal(bar.get_x(), pos)
nt.assert_equal(bar.get_y(), min(0, stat))
nt.assert_equal(bar.get_height(), abs(stat))
nt.assert_equal(bar.get_width(), p.width)
plt.close("all")
def test_draw_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(p.plot_data))
nt.assert_equal(len(ax.lines), len(p.plot_data))
for bar, color in zip(ax.patches, p.colors):
nt.assert_equal(bar.get_facecolor()[:-1], color)
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
nt.assert_equal(bar.get_x(), min(0, stat))
nt.assert_equal(bar.get_y(), pos)
nt.assert_equal(bar.get_height(), p.width)
nt.assert_equal(bar.get_width(), abs(stat))
plt.close("all")
def test_draw_nested_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
nt.assert_equal(len(ax.patches), n_groups * n_hues)
nt.assert_equal(len(ax.lines), n_groups * n_hues)
for bar in ax.patches[:n_groups]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[0])
for bar in ax.patches[n_groups:]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[1])
for bar, stat in zip(ax.patches, p.statistic.T.flat):
nt.assert_almost_equal(bar.get_y(), min(0, stat))
nt.assert_almost_equal(bar.get_height(), abs(stat))
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
nt.assert_almost_equal(bar.get_x(), pos - p.width / 2)
nt.assert_almost_equal(bar.get_width(), p.nested_width)
plt.close("all")
def test_draw_nested_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
nt.assert_equal(len(ax.patches), n_groups * n_hues)
nt.assert_equal(len(ax.lines), n_groups * n_hues)
for bar in ax.patches[:n_groups]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[0])
for bar in ax.patches[n_groups:]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[1])
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
nt.assert_almost_equal(bar.get_y(), pos - p.width / 2)
nt.assert_almost_equal(bar.get_height(), p.nested_width)
for bar, stat in zip(ax.patches, p.statistic.T.flat):
nt.assert_almost_equal(bar.get_x(), min(0, stat))
nt.assert_almost_equal(bar.get_width(), abs(stat))
plt.close("all")
def test_draw_missing_bars(self):
kws = self.default_kws.copy()
order = list("abcd")
kws.update(x="g", y="y", order=order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(order))
nt.assert_equal(len(ax.lines), len(order))
plt.close("all")
hue_order = list("mno")
kws.update(x="g", y="y", hue="h", hue_order=hue_order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(p.plot_data) * len(hue_order))
nt.assert_equal(len(ax.lines), len(p.plot_data) * len(hue_order))
plt.close("all")
def test_barplot_colors(self):
# Test unnested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, palette="muted")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("muted", len(self.g.unique()))
for patch, pal_color in zip(ax.patches, palette):
nt.assert_equal(patch.get_facecolor()[:-1], pal_color)
plt.close("all")
# Test single color
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, color=color)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
for patch in ax.patches:
nt.assert_equal(patch.get_facecolor(), color)
plt.close("all")
# Test nested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df,
saturation=1, palette="Set2")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("Set2", len(self.h.unique()))
for patch in ax.patches[:len(self.g.unique())]:
nt.assert_equal(patch.get_facecolor()[:-1], palette[0])
for patch in ax.patches[len(self.g.unique()):]:
nt.assert_equal(patch.get_facecolor()[:-1], palette[1])
plt.close("all")
def test_simple_barplots(self):
ax = cat.barplot("g", "y", data=self.df)
nt.assert_equal(len(ax.patches), len(self.g.unique()))
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "mean(y)")
plt.close("all")
ax = cat.barplot("y", "g", orient="h", data=self.df)
nt.assert_equal(len(ax.patches), len(self.g.unique()))
nt.assert_equal(ax.get_xlabel(), "mean(y)")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
ax = cat.barplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.patches),
len(self.g.unique()) * len(self.h.unique()))
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "mean(y)")
plt.close("all")
ax = cat.barplot("y", "g", "h", orient="h", data=self.df)
nt.assert_equal(len(ax.patches),
len(self.g.unique()) * len(self.h.unique()))
nt.assert_equal(ax.get_xlabel(), "mean(y)")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
class TestPointPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None,
order=None, hue_order=None,
markers="o", linestyles="-", dodge=0,
join=True, scale=1,
orient=None, color=None, palette=None)
def test_different_defualt_colors(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._PointPlotter(**kws)
color = palettes.color_palette()[0]
npt.assert_array_equal(p.colors, [color, color, color])
def test_hue_offsets(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", hue="h", data=self.df))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0])
kws.update(dict(dodge=.5))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.25, .25])
kws.update(dict(x="h", hue="g", dodge=0))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0, 0])
kws.update(dict(dodge=.3))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.15, 0, .15])
def test_draw_vertical_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(p.plot_data) + 1)
points = ax.collections[0]
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, p.statistic)
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
plt.close("all")
def test_draw_horizontal_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(p.plot_data) + 1)
points = ax.collections[0]
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, p.statistic)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
plt.close("all")
def test_draw_vertical_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 2)
nt.assert_equal(len(ax.lines),
len(p.plot_data) * len(p.hue_names) + len(p.hue_names))
for points, stats, color in zip(ax.collections,
p.statistic.T,
p.colors):
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, stats)
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
plt.close("all")
def test_draw_horizontal_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 2)
nt.assert_equal(len(ax.lines),
len(p.plot_data) * len(p.hue_names) + len(p.hue_names))
for points, stats, color in zip(ax.collections,
p.statistic.T,
p.colors):
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, stats)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
plt.close("all")
def test_pointplot_colors(self):
# Test a single-color unnested plot
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df, color=color)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines:
nt.assert_equal(line.get_color(), color[:-1])
for got_color in ax.collections[0].get_facecolors():
npt.assert_array_equal(got_color, color)
plt.close("all")
# Test a multi-color unnested plot
palette = palettes.color_palette("Set1", 3)
kws.update(x="g", y="y", data=self.df, palette="Set1")
p = cat._PointPlotter(**kws)
nt.assert_true(not p.join)
f, ax = plt.subplots()
p.draw_points(ax)
for line, pal_color in zip(ax.lines, palette):
npt.assert_array_equal(line.get_color(), pal_color)
for point_color, pal_color in zip(ax.collections[0].get_facecolors(),
palette):
npt.assert_array_equal(point_color[:-1], pal_color)
plt.close("all")
# Test a multi-colored nested plot
palette = palettes.color_palette("dark", 2)
kws.update(x="g", y="y", hue="h", data=self.df, palette="dark")
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines[:(len(p.plot_data) + 1)]:
nt.assert_equal(line.get_color(), palette[0])
for line in ax.lines[(len(p.plot_data) + 1):]:
nt.assert_equal(line.get_color(), palette[1])
for i, pal_color in enumerate(palette):
for point_color in ax.collections[i].get_facecolors():
npt.assert_array_equal(point_color[:-1], pal_color)
plt.close("all")
def test_simple_pointplots(self):
ax = cat.pointplot("g", "y", data=self.df)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(self.g.unique()) + 1)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "mean(y)")
plt.close("all")
ax = cat.pointplot("y", "g", orient="h", data=self.df)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(self.g.unique()) + 1)
nt.assert_equal(ax.get_xlabel(), "mean(y)")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
ax = cat.pointplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.collections), len(self.h.unique()))
nt.assert_equal(len(ax.lines),
(len(self.g.unique())
* len(self.h.unique())
+ len(self.h.unique())))
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "mean(y)")
plt.close("all")
ax = cat.pointplot("y", "g", "h", orient="h", data=self.df)
nt.assert_equal(len(ax.collections), len(self.h.unique()))
nt.assert_equal(len(ax.lines),
(len(self.g.unique())
* len(self.h.unique())
+ len(self.h.unique())))
nt.assert_equal(ax.get_xlabel(), "mean(y)")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
class TestCountPlot(CategoricalFixture):
def test_plot_elements(self):
ax = cat.countplot("g", data=self.df)
nt.assert_equal(len(ax.patches), self.g.unique().size)
for p in ax.patches:
nt.assert_equal(p.get_y(), 0)
nt.assert_equal(p.get_height(),
self.g.size / self.g.unique().size)
plt.close("all")
ax = cat.countplot(y="g", data=self.df)
nt.assert_equal(len(ax.patches), self.g.unique().size)
for p in ax.patches:
nt.assert_equal(p.get_x(), 0)
nt.assert_equal(p.get_width(),
self.g.size / self.g.unique().size)
plt.close("all")
ax = cat.countplot("g", hue="h", data=self.df)
nt.assert_equal(len(ax.patches),
self.g.unique().size * self.h.unique().size)
plt.close("all")
ax = cat.countplot(y="g", hue="h", data=self.df)
nt.assert_equal(len(ax.patches),
self.g.unique().size * self.h.unique().size)
plt.close("all")
def test_input_error(self):
with nt.assert_raises(TypeError):
cat.countplot()
with nt.assert_raises(TypeError):
cat.countplot(x="g", y="h", data=self.df)
class TestFactorPlot(CategoricalFixture):
def test_facet_organization(self):
g = cat.factorplot("g", "y", data=self.df)
nt.assert_equal(g.axes.shape, (1, 1))
g = cat.factorplot("g", "y", col="h", data=self.df)
nt.assert_equal(g.axes.shape, (1, 2))
g = cat.factorplot("g", "y", row="h", data=self.df)
nt.assert_equal(g.axes.shape, (2, 1))
g = cat.factorplot("g", "y", col="u", row="h", data=self.df)
nt.assert_equal(g.axes.shape, (2, 3))
plt.close("all")
def test_plot_elements(self):
g = cat.factorplot("g", "y", data=self.df)
nt.assert_equal(len(g.ax.collections), 1)
want_lines = self.g.unique().size + 1
nt.assert_equal(len(g.ax.lines), want_lines)
g = cat.factorplot("g", "y", "h", data=self.df)
want_collections = self.h.unique().size
nt.assert_equal(len(g.ax.collections), want_collections)
want_lines = (self.g.unique().size + 1) * self.h.unique().size
nt.assert_equal(len(g.ax.lines), want_lines)
g = cat.factorplot("g", "y", data=self.df, kind="bar")
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), want_elements)
g = cat.factorplot("g", "y", "h", data=self.df, kind="bar")
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), want_elements)
g = cat.factorplot("g", data=self.df, kind="count")
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), 0)
g = cat.factorplot("g", hue="h", data=self.df, kind="count")
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), 0)
g = cat.factorplot("g", "y", data=self.df, kind="box")
want_artists = self.g.unique().size
nt.assert_equal(len(g.ax.artists), want_artists)
g = cat.factorplot("g", "y", "h", data=self.df, kind="box")
want_artists = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.artists), want_artists)
g = cat.factorplot("g", "y", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
g = cat.factorplot("g", "y", "h", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
g = cat.factorplot("g", "y", data=self.df, kind="strip")
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
g = cat.factorplot("g", "y", "h", data=self.df, kind="strip")
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
plt.close("all")
def test_bad_plot_kind_error(self):
with nt.assert_raises(ValueError):
cat.factorplot("g", "y", data=self.df, kind="not_a_kind")
def test_count_x_and_y(self):
with nt.assert_raises(ValueError):
cat.factorplot("g", "y", data=self.df, kind="count")
def test_plot_colors(self):
ax = cat.barplot("g", "y", data=self.df)
g = cat.factorplot("g", "y", data=self.df, kind="bar")
for p1, p2 in zip(ax.patches, g.ax.patches):
nt.assert_equal(p1.get_facecolor(), p2.get_facecolor())
plt.close("all")
ax = cat.barplot("g", "y", data=self.df, color="purple")
g = cat.factorplot("g", "y", data=self.df,
kind="bar", color="purple")
for p1, p2 in zip(ax.patches, g.ax.patches):
nt.assert_equal(p1.get_facecolor(), p2.get_facecolor())
plt.close("all")
ax = cat.barplot("g", "y", data=self.df, palette="Set2")
g = cat.factorplot("g", "y", data=self.df,
kind="bar", palette="Set2")
for p1, p2 in zip(ax.patches, g.ax.patches):
nt.assert_equal(p1.get_facecolor(), p2.get_facecolor())
plt.close("all")
ax = cat.pointplot("g", "y", data=self.df)
g = cat.factorplot("g", "y", data=self.df)
for l1, l2 in zip(ax.lines, g.ax.lines):
nt.assert_equal(l1.get_color(), l2.get_color())
plt.close("all")
ax = cat.pointplot("g", "y", data=self.df, color="purple")
g = cat.factorplot("g", "y", data=self.df, color="purple")
for l1, l2 in zip(ax.lines, g.ax.lines):
nt.assert_equal(l1.get_color(), l2.get_color())
plt.close("all")
ax = cat.pointplot("g", "y", data=self.df, palette="Set2")
g = cat.factorplot("g", "y", data=self.df, palette="Set2")
for l1, l2 in zip(ax.lines, g.ax.lines):
nt.assert_equal(l1.get_color(), l2.get_color())
plt.close("all")
| bsd-3-clause |
EVEprosper/ProsperAPI | tests/test_split_utils.py | 1 | 17020 | """test_split_utils.py: tests for split_utils.py"""
from os import path
from math import floor
from datetime import datetime, timedelta
from tinydb import TinyDB, Query
import pandas as pd
import pytest
import publicAPI.split_utils as split_utils
import publicAPI.config as api_utils
import publicAPI.crest_utils as crest_utils
import publicAPI.forecast_utils as forecast_utils
import publicAPI.exceptions as exceptions
import helpers
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)
SPLIT_FILE = path.join(ROOT, 'publicAPI', 'split_info.json')
SPLIT_CACHE = path.join(ROOT, 'publicAPI', 'cache', 'travis_splitcache.json')
DAYS_SINCE_SPLIT = 10
TEST_DATE = datetime.utcnow() - timedelta(days=DAYS_SINCE_SPLIT)
FUTURE_DATE = datetime.utcnow() + timedelta(days=DAYS_SINCE_SPLIT)
DEMO_SPLIT = {
'type_id':35,
'type_name':'Tritanium',
'original_id':34,
'new_id':35,
'split_date':TEST_DATE.strftime('%Y-%m-%d'),
'bool_mult_div':'False',
'split_rate': 10
}
DEMO_UNSPLIT = {
'type_id':34,
'type_name':'Pyerite',
'original_id':34,
'new_id':35,
'split_date':FUTURE_DATE.strftime('%Y-%m-%d'),
'bool_mult_div':'True',
'split_rate': 10
}
DEMO_NOSPLIT = {
'type_id':35,
'type_name':'Tritanium',
'original_id':35,
'new_id':35,
'split_date':TEST_DATE.strftime('%Y-%m-%d'),
'bool_mult_div':'False',
'split_rate': 10
}
ROOT_CONFIG = helpers.get_config(
path.join(ROOT, 'scripts', 'app.cfg')
)
TEST_CONFIG = helpers.get_config(
path.join(HERE, 'test_config.cfg')
)
def test_splitinfo_happypath():
"""test SplitInfo behavior"""
split_obj = split_utils.SplitInfo(DEMO_SPLIT)
## Validate data inside obj ##
assert split_obj.type_id == DEMO_SPLIT['type_id']
assert split_obj.type_name == DEMO_SPLIT['type_name']
assert split_obj.original_id == DEMO_SPLIT['original_id']
assert split_obj.new_id == DEMO_SPLIT['new_id']
assert split_obj.split_date == datetime.strptime(DEMO_SPLIT['split_date'], '%Y-%m-%d')
assert split_obj.date_str == DEMO_SPLIT['split_date']
assert split_obj.bool_mult_div == False
assert split_obj.split_rate == DEMO_SPLIT['split_rate']
assert split_obj.current_typeid() == DEMO_SPLIT['new_id']
## Validate magicmethod behavior ##
assert int(split_obj) == DEMO_SPLIT['new_id']
assert bool(split_obj) #should be True
assert str(split_obj) == DEMO_SPLIT['type_name']
test_price = 3.5
test_volume = 1e6
expected_price = test_price / DEMO_SPLIT['split_rate']
expected_volume = test_volume * DEMO_SPLIT['split_rate']
assert test_price * split_obj == expected_price
assert split_obj * test_price == expected_price
assert test_volume / split_obj == expected_volume
def test_splitinfo_reverse():
"""validate SplitInfo with "True" bool_mult_div"""
split_obj = split_utils.SplitInfo(DEMO_UNSPLIT)
## Validate data inside obj ##
assert split_obj.bool_mult_div == True
assert split_obj.current_typeid() == DEMO_UNSPLIT['original_id']
test_price = 3.5
test_volume = 1e6
expected_price = test_price * DEMO_SPLIT['split_rate']
expected_volume = test_volume / DEMO_SPLIT['split_rate']
assert test_price * split_obj == expected_price
assert split_obj * test_price == expected_price
assert test_volume / split_obj == expected_volume
def test_splitinfo_throws():
"""make sure bad behavior is caught"""
short_profile = dict(DEMO_SPLIT)
short_profile.pop('split_rate', None)
with pytest.raises(exceptions.InvalidSplitConfig):
split_obj = split_utils.SplitInfo(short_profile)
bad_split = dict(DEMO_SPLIT)
bad_split['split_rate'] = 'bacon'
with pytest.raises(exceptions.InvalidSplitConfig):
split_obj = split_utils.SplitInfo(bad_split)
bad_date = dict(DEMO_SPLIT)
bad_date['split_date'] = 'Tomorrow'
with pytest.raises(exceptions.InvalidSplitConfig):
split_obj = split_utils.SplitInfo(bad_date)
bad_bool = dict(DEMO_SPLIT)
bad_bool['bool_mult_div'] = 'bacon'
with pytest.raises(exceptions.InvalidSplitConfig):
split_obj = split_utils.SplitInfo(bad_bool)
def test_load_data():
"""push data into global scope for testing"""
api_utils.SPLIT_INFO = split_utils.read_split_info()
demosplit_obj = split_utils.SplitInfo(DEMO_SPLIT)
revrsplit_obj = split_utils.SplitInfo(DEMO_UNSPLIT)
api_utils.SPLIT_INFO[demosplit_obj.type_id] = demosplit_obj
api_utils.SPLIT_INFO[revrsplit_obj.type_id] = revrsplit_obj
def test_datetime_helper():
"""validate datetime helper"""
short_string = '2017-04-01'
long_string = '2017-04-01T12:14:10'
bad_string = '2017-04-01T12:14:10-07:00'
short_datetime = split_utils.datetime_helper(short_string)
long_datetime = split_utils.datetime_helper(long_string)
with pytest.raises(ValueError):
bad_datetime = split_utils.datetime_helper(bad_string)
def test_split_history_throws():
"""make sure fetch_split_history throws expected errors"""
with pytest.raises(exceptions.NoSplitConfigFound):
split_obj = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
int(TEST_CONFIG.get('TEST', 'alt_id')) + 1,
api_utils.SwitchCCPSource.EMD
)
SPLIT_CACHE_FILE = path.join(
ROOT, 'publicAPI', 'cache', TEST_CONFIG.get('TEST', 'splitcache_file')
)
def test_fetch_cache_data():
"""fetch data from cache and make sure shape is correct"""
cache_data = split_utils.fetch_split_cache_data(
TEST_CONFIG.get('TEST', 'region_id'),
TEST_CONFIG.get('TEST', 'type_id'),
#split_cache_file=SPLIT_CACHE_FILE
)
missing_keys = set(cache_data.columns.values) - set(split_utils.KEEP_COLUMNS)
assert missing_keys == set()
def test_fetch_cache_fail():
"""make sure bad-path is covered"""
with pytest.raises(exceptions.NoSplitDataFound):
cache_data = split_utils.fetch_split_cache_data(
TEST_CONFIG.get('TEST', 'region_id'),
int(TEST_CONFIG.get('TEST', 'bad_typeid')),
#split_cache_file=SPLIT_CACHE_FILE
)
def test_execute_split_forward():
"""check if execute_split works as expected"""
split_obj = split_utils.SplitInfo(DEMO_SPLIT)
cache_data = split_utils.fetch_split_cache_data(
TEST_CONFIG.get('TEST', 'region_id'),
TEST_CONFIG.get('TEST', 'type_id'),
#split_cache_file=SPLIT_CACHE_FILE
)
split_data = split_utils.execute_split(
cache_data.copy(), #copy b/c otherwise passed by reference
split_obj
)
price_mod = split_obj.split_rate
if not split_obj.bool_mult_div:
price_mod = 1/price_mod
for col_name in split_utils.PRICE_KEYS:
price_diff = abs(split_data[col_name] - (cache_data[col_name] * price_mod))
assert price_diff.max() < float(TEST_CONFIG.get('TEST', 'float_limit'))
#float() is weird, look for difference to be trivially small
vol_mod = 1/price_mod
for col_name in split_utils.VOLUME_KEYS:
vol_diff = abs(split_data[col_name] - (cache_data[col_name] * vol_mod))
assert vol_diff.max() < float(TEST_CONFIG.get('TEST', 'float_limit'))
def test_execute_split_backwards():
"""check if execute_split works as expected"""
split_obj = split_utils.SplitInfo(DEMO_UNSPLIT)
cache_data = split_utils.fetch_split_cache_data(
TEST_CONFIG.get('TEST', 'region_id'),
TEST_CONFIG.get('TEST', 'type_id'),
#split_cache_file=SPLIT_CACHE_FILE
)
split_data = split_utils.execute_split(
cache_data.copy(), #copy b/c otherwise passed by reference
split_obj
)
price_mod = split_obj.split_rate
if not split_obj.bool_mult_div:
price_mod = 1/price_mod
for col_name in split_utils.PRICE_KEYS:
price_diff = abs(split_data[col_name] - (cache_data[col_name] * price_mod))
assert price_diff.max() < float(TEST_CONFIG.get('TEST', 'float_limit'))
vol_mod = 1/price_mod
for col_name in split_utils.VOLUME_KEYS:
vol_diff = abs(split_data[col_name] - (cache_data[col_name] * vol_mod))
assert vol_diff.max() < float(TEST_CONFIG.get('TEST', 'float_limit'))
@pytest.mark.incremental
class TestNoSplit:
"""validate behavior if there's no split to perform"""
test_type_id = DEMO_UNSPLIT['type_id']
def test_future_split_esi(self):
"""validate on ESI"""
test_data_esi = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
api_utils.SwitchCCPSource.ESI,
config=ROOT_CONFIG
)
assert test_data_esi.equals(
crest_utils.fetch_market_history(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
config=ROOT_CONFIG
)
)
def test_future_split_emd(self):
"""valdiate with EMD source"""
test_data_emd = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
fetch_source=api_utils.SwitchCCPSource.EMD,
data_range=TEST_CONFIG.get('TEST', 'history_count'),
config=ROOT_CONFIG
)
emd_data_raw = forecast_utils.fetch_market_history_emd(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
data_range=TEST_CONFIG.get('TEST', 'history_count'),
config=ROOT_CONFIG
)
assert test_data_emd.equals(forecast_utils.parse_emd_data(emd_data_raw['result']))
def test_short_split(self):
"""make sure escaped if split was too far back"""
short_days = floor(DAYS_SINCE_SPLIT/2)
test_data_emd = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
DEMO_SPLIT['type_id'],
data_range=short_days,
config=ROOT_CONFIG
)
emd_data_raw = forecast_utils.fetch_market_history_emd(
TEST_CONFIG.get('TEST', 'region_id'),
DEMO_SPLIT['type_id'],
data_range=short_days,
config=ROOT_CONFIG
)
assert test_data_emd.equals(
forecast_utils.parse_emd_data(emd_data_raw['result']))
def days_since_date(date_str):
"""return number of days since date requested
Args:
date_str (str)
Returns
int: number of days since date
"""
demo_date = split_utils.datetime_helper(date_str)
delta = datetime.utcnow() - demo_date
return delta.days
def prep_raw_data(
data,
min_date
):
"""clean up data for testing
Args:
data (:obj:`pandas.DataFrame`): dataframe to clean (COPY)
min_date (str): datetime to filter to
Returns:
pandas.DataFrame: clean_data
"""
clean_data = data[data.date >= min_date]
clean_data = clean_data[split_utils.KEEP_COLUMNS]
clean_data.sort_values(
by='date',
ascending=False,
inplace=True
)
return clean_data
def validate_plain_data(
raw_data,
split_data,
float_limit=float(TEST_CONFIG.get('TEST', 'float_limit'))
):
"""validate data that did not split
Args:
raw_data (:obj:`pandas.DataFrame`): raw data (A group)
split_data (:obj:`pandas.DataFrame`): split data (B group)
float_limit (float): maximum deviation for equality test
Returns:
(None): asserts internally
"""
for column in split_data.columns.values:
print(split_data[column])
print(raw_data[column])
if column == 'date':
assert split_data[column].equals(raw_data[column])
elif column == 'index':
continue
else:
diff = abs(pd.to_numeric(split_data[column]) - pd.to_numeric(raw_data[column]))
assert diff.max() < float_limit
def validate_split_data(
raw_data,
split_data,
split_obj,
float_limit=float(TEST_CONFIG.get('TEST', 'float_limit'))
):
"""validate data that did not split
Args:
raw_data (:obj:`pandas.DataFrame`): raw data (A group)
split_data (:obj:`pandas.DataFrame`): split data (B group)
split_obj (:obj:`split_utils.SplitInfo`): split information
float_limit (float): maximum deviation for equality test
Raises:
AssertionError: asserts expected shapes
"""
for column in split_data.columns.values:
#print(split_data[column])
#print(raw_data[column])
if column == 'date':
assert split_data[column].equals(raw_data[column])
elif column == 'index':
continue
elif column in split_utils.PRICE_KEYS:
diff = abs(
pd.to_numeric(split_data[column]) - pd.to_numeric(raw_data[column]) * split_obj
)
assert diff.max() < float_limit
else:
diff = abs(
pd.to_numeric(split_data[column]) - pd.to_numeric(raw_data[column]) / split_obj
)
assert diff.max() < float_limit
class TestSplit:
"""test end-to-end behavior on fetch_split_history"""
test_type_id = DEMO_SPLIT['type_id']
test_original_id = DEMO_SPLIT['original_id']
def test_forward_happypath_esi(self):
"""test a forward-split: ESI"""
split_obj = split_utils.SplitInfo(DEMO_SPLIT)
raw_esi_data1 = crest_utils.fetch_market_history(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
config=ROOT_CONFIG
)
raw_esi_data2 = crest_utils.fetch_market_history(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_original_id,
config=ROOT_CONFIG
)
split_data = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
DEMO_SPLIT['type_id'],
fetch_source=api_utils.SwitchCCPSource.ESI,
config=ROOT_CONFIG
)
#split_data.to_csv('split_data_esi.csv', index=False)
## Doctor data for testing ##
min_split_date = split_data.date.min()
raw_esi_data1 = prep_raw_data(
raw_esi_data1.copy(),
min_split_date
)
raw_esi_data2 = prep_raw_data(
raw_esi_data2.copy(),
min_split_date
)
pre_split_data = split_data[split_data.date <= split_obj.date_str].reset_index()
pre_raw_data = raw_esi_data2[raw_esi_data2.date <= split_obj.date_str].reset_index()
post_split_data = split_data[split_data.date > split_obj.date_str].reset_index()
post_raw_data = raw_esi_data1[raw_esi_data1.date > split_obj.date_str].reset_index()
## Validate pre/post Split values ##
validate_plain_data(
post_raw_data,
post_split_data
)
validate_split_data(
pre_raw_data,
pre_split_data,
split_obj
)
def test_forward_happypath_emd(self):
"""test a forward-split: emd"""
split_obj = split_utils.SplitInfo(DEMO_SPLIT)
raw_emd_data = forecast_utils.fetch_market_history_emd(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_type_id,
data_range=TEST_CONFIG.get('TEST', 'history_count'),
config=ROOT_CONFIG
)
raw_emd_data1 = forecast_utils.parse_emd_data(raw_emd_data['result'])
raw_emd_data = forecast_utils.fetch_market_history_emd(
TEST_CONFIG.get('TEST', 'region_id'),
self.test_original_id,
data_range=TEST_CONFIG.get('TEST', 'history_count'),
config=ROOT_CONFIG
)
raw_emd_data2 = forecast_utils.parse_emd_data(raw_emd_data['result'])
split_data = split_utils.fetch_split_history(
TEST_CONFIG.get('TEST', 'region_id'),
DEMO_SPLIT['type_id'],
api_utils.SwitchCCPSource.EMD,
config=ROOT_CONFIG
)
## Doctor data for testing ##
min_split_date = split_data.date.min()
raw_emd_data1 = prep_raw_data(
raw_emd_data1.copy(),
min_split_date
)
raw_emd_data2 = prep_raw_data(
raw_emd_data2.copy(),
min_split_date
)
pre_split_data = split_data[split_data.date <= split_obj.date_str].reset_index()
pre_raw_data = raw_emd_data2[raw_emd_data2.date <= split_obj.date_str].reset_index()
post_split_data = split_data[split_data.date > split_obj.date_str].reset_index()
post_raw_data = raw_emd_data1[raw_emd_data1.date > split_obj.date_str].reset_index()
## Validate pre/post Split values ##
validate_plain_data(
post_raw_data,
post_split_data
)
validate_split_data(
pre_raw_data,
pre_split_data,
split_obj
)
| mit |
mattilyra/scikit-learn | sklearn/ensemble/tests/test_forest.py | 21 | 41768 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
wheeler-microfluidics/droplet-planning | rename.py | 1 | 2568 | import sys
import pandas as pd
from path_helpers import path
def main(root, old_name, new_name):
names = pd.Series([old_name, new_name], index=['old', 'new'])
underscore_names = names.map(lambda v: v.replace('-', '_'))
camel_names = names.str.split('-').map(lambda x: ''.join([y.title()
for y in x]))
# Replace all occurrences of provided original name with new name, and all
# occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Dashes are used in Python package names, but underscores are used in
# Python module names.
for p in path(root).walkfiles():
data = p.bytes()
if '.git' not in p and (names.old in data or
underscore_names.old in data or
camel_names.old in data):
p.write_bytes(data.replace(names.old, names.new)
.replace(underscore_names.old, underscore_names.new)
.replace(camel_names.old, camel_names.new))
def rename_path(p):
if '.git' in p:
return
if underscore_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(underscore_names.old,
underscore_names.new)))
if camel_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(camel_names.old,
camel_names.new)))
# Rename all files/directories containing original name with new name, and
# all occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Process list of paths in *reverse order* to avoid renaming parent
# directories before children.
for p in sorted(list(path(root).walkdirs()))[-1::-1]:
rename_path(p)
for p in path(root).walkfiles():
rename_path(p)
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Rename template project with'
'hyphen-separated <new name> (path names and in '
'files).')
parser.add_argument('new_name', help='New project name (e.g., '
' `my-new-project`)')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
main('.', 'droplet-planning', args.new_name)
| gpl-2.0 |
ottogroup/palladium | palladium/R.py | 1 | 4433 | """Support for building models using the R programming language.
"""
from palladium.interfaces import DatasetLoader
from palladium.interfaces import Model
import numpy as np
from pandas import Categorical
from pandas import DataFrame
from pandas import Series
from rpy2 import robjects
from rpy2.robjects import pandas2ri
from rpy2.robjects.pandas2ri import py2ri
from rpy2.robjects.numpy2ri import numpy2ri
from sklearn.base import TransformerMixin
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.preprocessing import LabelEncoder
pandas2ri.activate()
class ObjectMixin:
r = robjects.r
def __init__(self, scriptname, funcname, **kwargs):
self.scriptname = scriptname
self.funcname = funcname
self.r.source(scriptname)
self.rfunc = self.r[funcname]
self.kwargs = kwargs
class DatasetLoader(DatasetLoader, ObjectMixin):
"""A :class:`~palladium.interfaces.DatasetLoader` that calls an R
function to load the data.
"""
def __call__(self):
X, y = self.rfunc(**self.kwargs)
return X, y
class AbstractModel(Model, ObjectMixin):
def __init__(self, encode_labels=False, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self.encode_labels = encode_labels
@staticmethod
def _from_python(obj):
if isinstance(obj, DataFrame):
obj = py2ri(obj)
elif isinstance(obj, Series):
obj = numpy2ri(obj.values)
elif isinstance(obj, np.ndarray):
obj = numpy2ri(obj)
return obj
def fit(self, X, y=None):
if self.encode_labels:
self.enc_ = LabelEncoder()
y = self.enc_.fit_transform(y)
self.rmodel_ = self.rfunc(
self._from_python(X),
self._from_python(y),
**self.kwargs)
class ClassificationModel(AbstractModel):
"""A :class:`~palladium.interfaces.Model` for classification problems
that uses an R model for training and prediction.
"""
def predict_proba(self, X):
X = self._from_python(X)
return np.asarray(self.r['predict'](self.rmodel_, X, type='prob'))
def predict(self, X):
X = X.astype(float) if hasattr(X, 'astype') else X
y_pred = np.argmax(self.predict_proba(X), axis=1)
if self.encode_labels:
y_pred = self.enc_.inverse_transform(y_pred)
return y_pred
def score(self, X, y):
return accuracy_score(self.predict(X), np.asarray(y))
class RegressionModel(AbstractModel):
"""A :class:`~palladium.interfaces.Model` for regression problems
that uses an R model for training and prediction.
"""
def predict(self, X):
X = self._from_python(X)
return np.asarray(self.r['predict'](self.rmodel_, X))
def score(self, X, y):
return r2_score(self.predict(X), np.asarray(y))
class Rpy2Transform(TransformerMixin):
def fit(self, X, y):
if isinstance(X, np.ndarray):
pass
elif isinstance(X, DataFrame):
self.index2levels_ = {}
for index, column in enumerate(X.columns):
if hasattr(X[column].dtype, 'categories'):
self.index2levels_[index] = tuple(
X[column].dtype.categories)
self.colnames_ = list(X.columns)
else:
self.index2levels_ = {}
for index in range(len(X.colnames)):
if hasattr(X[index], 'levels'):
self.index2levels_[index] = tuple(X[index].levels)
self.colnames_ = X.colnames
return self
def transform(self, X):
if isinstance(X, (np.ndarray, list)) and hasattr(self, 'index2levels_'):
X = DataFrame(X, columns=self.colnames_)
if isinstance(X, DataFrame) and hasattr(self, 'index2levels_'):
for index, levels in self.index2levels_.items():
colname = X.columns[index]
X[colname] = Categorical(
X[colname],
categories=levels,
)
X = py2ri(X)
if hasattr(self, 'colnames_'):
# Deal with an rpy2 issue whereas colnames appear to get
# mangled when calling py2ri. Also, apply colnames if
# predict data was missing them:
X.colnames = self.colnames_
return X
| apache-2.0 |
mmessick/Tax-Calculator | taxcalc/tests/test_records.py | 3 | 1742 | import os
import sys
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(CUR_PATH, "../../"))
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
import pytest
import tempfile
from numba import jit, vectorize, guvectorize
from taxcalc import *
from taxcalc.utils import expand_array
tax_dta_path = os.path.join(CUR_PATH, "../../tax_all1991_puf.gz")
def test_create_records():
r = Records(tax_dta_path)
assert r
def test_create_records_from_file():
r = Records.from_file(tax_dta_path)
assert r
def test_imputation():
e17500 = np.array([20., 4.4, 5.])
e00100 = np.array([40., 8.1, 90.1])
e18400 = np.array([25., 34., 10.])
e18425 = np.array([42., 20.3, 49.])
e62100 = np.array([75., 12.4, 84.])
e00700 = np.array([43.3, 34.1, 3.4])
e04470 = np.array([21.2, 12., 13.1])
e21040 = np.array([45.9, 3., 45.])
e18500 = np.array([33.1, 18.2, 39.])
e20800 = np.array([0.9, 32., 52.1])
cmbtp_itemizer = np.array([68.4, -31.0025, -84.7])
"""
Test case values:
x = max(0., e17500 - max(0., e00100) * 0.075) = [17., 3.7925, 0]
medical_adjustment = min(x, 0.025 * max(0.,e00100)) = [-1.,-.2025,0]
state_adjustment = max(0, max(e18400, e18425)) = [42., 34., 49.]
_cmbtp_itemizer = (e62100 - medical_adjustment + e00700 + e04470 + e21040
- z - e00100 - e18500 - e20800)
= [68.4, -31.0025 ,-84.7]
"""
test_itemizer = records.imputation(e17500, e00100, e18400, e18425,
e62100, e00700, e04470,
e21040, e18500, e20800)
assert(np.allclose(cmbtp_itemizer, test_itemizer))
| mit |
asoliveira/NumShip | source/Navio.py | 2 | 50397 | # -*- coding: utf-8 -*-
#
#This file is part of a program called NumShip
#Copyright (C) 2011,2012 Alex Sandro Oliveira
#NumShip is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Navio Module
============
Algumas palavras teste
"""
#imports
import os
import scipy as sp
from scipy import linalg
from scipy import stats
#Módulos criados
from Casco import *
from Leme import *
from Prop import *
class inte(object):
"""
Classe que realiza a integração no tempo
:version:0.0
:author: Alex S. Oliveira
"""
def __init__(self):
"""
"""
pass
def rk4(self, function, x, t0, dt, par = None):
"""
Integrador runge-kutta
"""
k1 = function(x, t0, par)
k2 = function(x + 1./2*dt*k1, t0 + 1./2*dt, par)
k3 = function(x + 1./2*dt*k2, t0 + 1./2*dt, par)
k4 = function(x + dt*k3, t0 + dt, par)
xt = x + 1./6*(k1+ 2.*k2+ 2.*k3+ k4)*dt
return xt
def euler(self, f, x, t0, dt, par= None ):
"""
"""
return x + f(x, t0, par)*dt
class navio:
"""Classe de navios"""
def __init__ (self, dicvar, nome = 'navioteste', tipo = 'MARAD'):
""""Construtor do navio
:param dicvar: Dicionário de derivadas hidrodinâmicas e parâmetros
necessários para a construção do navio;
:param nome: Nome do navio. Não possui relevância(default = 'navioteste');
:param tipo: Tipo de modelo matemático adotado para a construção do navio
(default = 'MARAD');
:type dicvar: dict
:type nome: str
:type tipo: str
"""
self.nome = nome
self.vel = sp.zeros((6, 1))
self.acel = sp.zeros((6, 1))
self.pos = sp.zeros((6, 1))
self.dic = dicvar
self.tipo = tipo
self.integrador = inte()
self.uc = sp.array(self.dic['unom'])
if tipo == 'TP':
self.leme = lemeTris(dicvar)
self.casco = cascoTris(dicvar)
self.prop = prop()
elif tipo == 'MARAD':
self.leme = lemeMarAd(dicvar)
self.casco = cascoMarAd(dicvar)
self.prop = propMarAd(dicvar)
def MostraVel (self):
"""Retorna a Velocidade da embarcação"""
return self.vel
def MostraAcel (self):
"""Retorna a aceleração da embarcação"""
return self.acel
def MostraLeme (self):
"""Retorna o leme em rad da embarcação"""
return self.leme.MostraLeme()
def MostraLemeCom (self):
"""Retorna o leme em rad da embarcação"""
return self.leme.MostraLemeCom()
def MostraPos (self):
"""Retorna a posição da embarcação"""
return self.pos
def MostraRotCom (self):
"""Retorna a rotação comandada"""
return self.prop.MostraRotCom()
def MostraRot (self):
"""Retorna a rotação"""
return self.prop.MostraRot()
def MostraVelCom (self):
"""Retorna a velocidade comandada"""
return self.uc
def MudaVelCom (self, uc):
"""Muda a velocidade comandada"""
self.uc = uc.copy()
self.prop.MudaVelCom(uc)
pass
def MudaLemeCom (self, leme):
"""Muda o leme comandado da embarcação
:param leme:
"""
temp = leme.copy()
self.leme.MudaLemeCom(temp)
pass
def MudaVel (self, velocidade):
"""Muda a velocidade da embarcação
:param velocidade: velocidade (m/s)
"""
temp = velocidade.copy()
self.vel = temp
self.casco.MudaVel(temp)
self.leme.MudaVel(temp)
self.prop.MudaVel(temp)
pass
def MudaPos (self, posicao):
"""Muda a posição da embarcação
:param posição: -- posição (m)
"""
temp = posicao.copy()
self.pos = temp
self.casco.MudaPos(temp)
self.leme.MudaPos(temp)
self.prop.MudaPos(temp)
pass
def MudaRotCom (self, Rot):
"""Muda a rotação Comandada da embarcação"""
self.prop.MudaRotCom(Rot)
pass
def CalcFx (self):
"""Calcula a força em Surge"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = m*(v*r + xg*(r**2) - zg*p*r)
if self.tipo == 'MARAD':
saida = (self.casco.Fx() + self.prop.Fx() +
self.leme.Fx(self.MostraRot(),
self.MostraVelCom() / self.MostraVel()[0]) + cori)
elif self.tipo == 'TP':
saida = self.casco.Fx() + self.leme.Fx() + self.prop.Fx() + cori
return saida
def CalcFy (self):
"""Calcula a força em Sway"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = -m*u*r
if self.tipo == 'MARAD':
saida = (self.casco.Fy() + self.leme.Fy(self.MostraRot()) +
self.prop.Fy() + cori)
elif self.tipo == 'TP':
saida = self.casco.Fy() + self.leme.Fy() + self.prop.Fy() + cori
return saida
def CalcK (self):
""" Calcula o momento de Roll"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = m*zg*u*r
if self.tipo == 'MARAD':
saida = (self.casco.K() + self.leme.K(self.MostraRot()) +
self.prop.K() + cori)
elif self.tipo == 'TP':
saida = self.casco.K() + self.leme.K() + self.prop.K() + cori
return saida
def CalcN (self):
"""Calcula o momento de Yaw"""
m = self.dic['m']*(self.dic['rho']*(self.dic['lpp']**3)/2)
u = self.MostraVel()[0]
v = self.MostraVel()[1]
p = self.MostraVel()[3]
r = self.MostraVel()[5]
xg = self.dic['xg']
zg = self.dic['zg']
cori = -m*xg*u*r
if self.tipo == 'MARAD':
saida = (self.casco.N() + self.leme.N(self.MostraRot()) +
self.prop.N() + cori)
elif self.tipo == 'TP':
saida = self.casco.N() + self.leme.N() + self.prop.N() + cori
return saida
def VetF (self, p=None):
r"""Vetor de forças.
Retorna o vetor de forças atuantes na embarcação:
.. math::
f (\beta, r, \delta_r) = \left[\begin{array}{c c c c}
\beta_{x} X(\beta) &r_{x}X(r) &\delta_{R x}X(\delta_R)
&X_{res} \\
\beta_{y} Y(\beta) &r_{y}Y(r) &\delta_{R y}Y(\delta_R)
&Y_{res} \\
\beta_{n} N(\beta) &r_{n}N(r) &\delta_{R n}N(\delta_R)
&N_{res}
\end{array}\right]
:param p: Tupla, onde:
* p[0] (int) -- Termo que determina quantos graus de liberdade
possui o modelo matemático;
* p[1] (tupla) -- Pesos Com os fatores multiplicadores das
forças. Tomando :math:`p\left[1\right] =
\left[p_a \ p_b \ p_c \ p_d\right]`,então,
.. math::
&p_a &= \left[ \beta_x \ \beta_y \ \beta_n \right],\\
&p_b &= \left[ r_x \ r_y \ r_n \right],\\
&p_c &= \left[ \delta_{Rx} \ \delta_{Ry} \
\delta_{Rn} \right]
&p_d &= \left[ res_{Rx} \ res_{Ry} \
res_{Rn} \right]
Cada fator é um int ou float.
Este termo é opcional onde o valor default é uma
simulação com um modelo de quatro-4- graus de liberdade
e as forças sem nenhum termo multiplicativo.
:type p: tuple
:return: Uma matriz com as forças que atuam na embarcação.
:rtype: numpy.ndarray
:Example:
>>> import scipy as sp
>>> import matplotlib.pyplot as plt
>>> import Es
>>> import Navio
>>>
>>> entrada = ('NavioTeste', '../dados/bemformatado.dat',
... 'inputtab.dat')
>>> en = Es.es(entrada)
>>> nav = Navio.navio(en.lerarqder())
>>> nav.MudaVelCom(nav.uc)
>>> nav.MudaVel(sp.array([nav.uc, 0, 0, 0, 0, 0]))
>>> nav.VetF().reshape(4,1)
array([[ 0. ],
[ 78315.9850437 ],
[ 0. ],
[-14403055.51605981]])
>>>
>>> pa = (1, 1, 1)
>>> pb = (1, 1, 1)
>>> pc = (1, 1, 1)
>>> pd = (1, 1, 1)
>>> p = (pa, pb, pc, pd)
>>> gl = 3
>>>
>>> print nav.VetF((gl, p)).reshape(3,1)
[[ 0. ]
[ 78315.9850437 ]
[-14403055.51605981]]
"""
if p == None:
GrausDeLib = 4
peso = None
elif len(p) == 1:
GrausDeLib = p[0]
peso = None
elif len(p) == 2:
GrausDeLib = p[0]
peso = p[1]
if peso == None:
if GrausDeLib == 4:
saida = sp.array([self.CalcFx(), self.CalcFy(),
self.CalcK(), self.CalcN()])
elif GrausDeLib == 3:
saida = sp.array([self.CalcFx(), self.CalcFy(), self.CalcN()])
else:
#Arquivando as variáveis do navio, pois será feito modificações
#posteriores para o cálculo das forças de uma maneira modular
lemearq = self.MostraLeme()
velarq = self.MostraVel()
uc = self.MostraVelCom()
#Configurando o navio para o cálculo das forças na condição em que a
#embarcação está em equilíbrio somente com velocidade longitudinal u
self.leme.MudaLemeDir(sp.array(0.))
self.MudaVelCom(velarq[0]) #condição eta=1
# setando a velocidade V = [u, 0, 0, 0, 0, 0]
veltemp = sp.zeros((6,))
veltemp[0] = velarq[0]
self.MudaVel(veltemp)
fu = self.VetF((GrausDeLib, ))
#Configurando o navio para o cálculo das forças na condição em que a
#embarcação está com velocidade V = [u, v, 0, 0, 0, 0]
veltemp = sp.zeros((6,))
veltemp[0] = velarq[0]
veltemp[1] = velarq[1]
self.MudaVel(veltemp)
# leme = 0 e eta = 1
fbeta = self.VetF((GrausDeLib, )) - fu
fbeta2 = fbeta.copy()
it = 0
for arg in peso[0]:
fbeta[it] = arg* fbeta[it]
it +=1
#Configurando o navio para o cálculo das forças na condição em que a
#embarcação está com velocidade V = [u, 0, 0, 0, 0, psi]
veltemp = sp.zeros((6,))
veltemp[5] = velarq[5]
veltemp[0] = velarq[0]
self.MudaVel(veltemp)
# leme = 0 e eta = 1
fr = self.VetF((GrausDeLib, )) - fu
fr2 = fr.copy()
it = 0
for arg in peso[1]:
fr[it] = arg* fr[it]
it +=1
#Configurando o navio para o cálculo das forças na condição em que a
#embarcação está com velocidade V = [u, 0, 0, 0, 0, 0] e leme igual ao
#leme inicial, e eta = 1.
self.leme.MudaLemeDir(lemearq)
veltemp = sp.zeros((6,))
veltemp[0] = velarq[0]
self.MudaVel(veltemp)
fleme = self.VetF((GrausDeLib, )) - fu
fleme2 = fleme.copy()
it = 0
for arg in peso[2]:
fleme[it] = arg* fleme[it]
it +=1
#Configurando o navio para o cálculo das forças na condição em que a
#embarcação está com velocidade V = [u, 0, 0, 0, 0, 0],
#leme = 0, e eta = 1. Agora será feito da força devido as interações e
#ao resíduo
self.MudaVel(velarq)
self.MudaVelCom(uc)
fbetarl = self.VetF((GrausDeLib, )) - (fbeta2 + fr2 + fleme2)
it = 0
for arg in peso[3]:
fbetarl[it] = arg* fbetarl[it]
it +=1
del it
saida = fbeta + fr + fleme + fbetarl
return saida
def H (self, GrausDeLib=4):
"""Matriz de massa menos matriz de massa adicional.
:param GrausDeLib: Graus de liberdade.
:type GrausDeLib: int
"""
H = None
H = self.casco.M(GrausDeLib) - self.casco.Ma(GrausDeLib)
return sp.mat(H)
def MatRot (self, p=None):
"""Retorna a matriz de rotação de do referencial solidário para o
inercial
"""
if p== None:
roll= self.MostraPos()[3]
pitch = self.MostraPos()[4]
yaw = self.MostraPos()[5]
else:
roll= p[0]
pitch = p[1]
yaw = p[2]
Rot = sp.array([[sp.cos(yaw) * sp.cos(pitch),
-sp.sin(yaw) * sp.cos(roll) +
sp.cos(yaw) * sp.sin(pitch) * sp.sin(roll),
sp.sin(yaw) * sp.sin(roll) + sp.cos(yaw) * sp.cos(roll)
* sp.sin(pitch)], [sp.sin(yaw) * sp.cos(pitch),
sp.cos(yaw) * sp.cos(roll) + sp.sin(roll) *
sp.sin(pitch) * sp.sin(yaw), -sp.cos(yaw) *
sp.sin(roll) + sp.sin(yaw) * sp.cos(roll) *
sp.sin(pitch)],
[-sp.sin(pitch), sp.cos(pitch) * sp.sin(roll),
sp.cos(pitch) * sp.cos(roll)]])
Rot.shape = (3, 3)
Rot= sp.matrix(Rot)
return Rot
def f2 (self, VetF, H):
r"""Calcula o valor de f(x) na equação :math:`\dot x = f(x)` onde x são é
o vetor de velocidades no sistema solidário.
:param VetF:
:param H:
"""
GrausDeLib = len(VetF)
if GrausDeLib == 4:
a= sp.zeros((6, 6))
a[5, 5] = 1.
a[4, 4] = 1.
a[:4, :4]= H
b= sp.zeros((6, 1))
b [4, 0] = self.vel[3]
b [5, 0] = self.vel[5]*sp.cos(self.MostraPos()[3])
b[:4, :]= VetF
elif GrausDeLib == 3:
a= sp.zeros((4, 4))
a[3, 3] = 1.
a[:3, :3]= H
b= sp.zeros((4, 1))
b[:3, :]= VetF
b[3, 0] = self.MostraVel()[5]
saida = linalg.solve(a, b )
return saida
def f (self, velocidade=None, t=None, p=(4,)):
"""O p é uma tupla com o valor dos graus de liberdade"""
GrausDeLib = p[0]
if velocidade != None:
velarq = self.MostraVel()
posarq = self.MostraPos()
veltemp = sp.zeros((6, 1))
postemp = sp.zeros((6, 1))
if GrausDeLib == 3:
veltemp[:2] = velocidade[:2]
veltemp[5] = velocidade[2]
postemp[5] = velocidade[3]
elif GrausDeLib==4:
veltemp[:2] = velocidade[:2]
veltemp[3] = velocidade[2]
veltemp[5] = velocidade[3]
postemp[3] = velocidade[4]
postemp[5] = velocidade[5]
self.MudaVel(veltemp)
self.MudaPos(postemp)
if GrausDeLib == 4:
a = sp.zeros((6, 6))
a[5, 5] = 1.
a[4, 4] = 1.
a[:4, :4]= self.H(GrausDeLib)
b = sp.zeros((6, 1))
b [4, 0] = self.vel[3]
b [5, 0] = self.vel[5] * sp.cos(self.MostraPos()[3])
b[:4, :]= self.VetF(p)
elif GrausDeLib == 3:
a= sp.zeros((4, 4))
a[3, 3] = 1.
a[:3, :3] = self.H(GrausDeLib)
b= sp.zeros((4, 1))
b[:3, :]= self.VetF(p)
b[3, 0] = self.MostraVel()[5]
saida = linalg.solve(a, b)
if velocidade != None:
self.MudaVel(velarq)
self.MudaPos(posarq)
return saida
def fvein (self, x, t, p):
"""
:param x: sp.array(u, v , w)
:param t:
:param p: ((roll, pitch, yaw), (u, v, r))
"""
return sp.array(self.MatRot(p[0])*p[1])
def getCurvaGiro (self, peso=None, met='euler', t0=0., dt=0.5, t=100.,
GrausDeLib=3, tipo='port', leme=sp.array(20.),
rotcom=None, velcom= None, vel=None, eta='vel',
posine=None,
errotf=sp.array(0.05), errotd=sp.array(0.05),
errosr=sp.array(0.001), saida='txt', arqs='saida'):
r"""Simula manobras de Curva de Giro.
:param GrausDeLib: Graus de liberdade de modelo matemático;
:param met: Método de integração. (default = euler);
:param t0: Tempo inicial;
:param dt: Passo no tempo;
:param t: Tempo final;
:param leme: Ângulo do leme em graus;
:param proa: Ângulo de ataque em graus para iniciar a mudança de leme.
Utilizada na curva de Zig/Zag;
:param osa: Ajuste do erro no ângulo de overshoot para iniciar a mudança
de leme na curva de ZigZag;
:param ospath:
:param errosr:
:param errotf: Valor mínima da diferença :math:`\pi / 2 - \psi` para
considerar o valor `transferência` e o do `avanço`;
:param errotd: Valor mínima da diferença :math:`\pi - \psi` para
considerar o valor do `diâmetro tático`;
:param saida: Tipo de arquivo de saída;
:param arqs: Nome do arquivo de saída;
:param rotcom: Comando de rotação do propulsor[opcional];
:param velcom: Comando de velocidade da embarcação[opcional];
:param vel: Velocidade da embarcação[opcional];
:return: Uma tupla (velohis, poshis, acelhis, fhis, veloinerhis, lemehis,
prophis, etahis, dados, betahis)
Em cada elemento da tupla a primeira coluna é o passo de tempo e
as demais são as variáveis:
* velohis -- histórico de velocidades;
* poshis -- histórico de posições;
* acelhis --- histórico de acelerações;
* fhis -- histórico de forças;
* veloinerhis -- histórico de velocidades no sistema inercial;
* lemehis -- histórico do comando de leme.
Ou simplesmente cria arquivos `txt` no diretório indicado na
entrada com todos este valores
:type GrausDeLib: int
:type met: str
:type t0: float;
:type dt: float;
:type t: float;
:type leme: numpy.ndarray;
:type proa: numpy.ndarray
:type osa: numpy.ndarray
:type ospath: numpy.ndarray
:type erro: numpy.ndarray
:type errotf: numpy.ndarray
:type errotd: numpy.ndarray
:type arqs: str
:type rotcom: numpy.ndarray
:type velcom: numpy.ndarray
:type vel: numpy.ndarray
:rtype: tuple, file
"""
if rotcom == None:
rotcom = self.dic['rotnom']
if velcom == None:
velcom = self.dic['unom']
if vel == None:
vel = sp.zeros((6,1))
vel[0] = self.dic['unom']
if posine == None:
posine = sp.zeros((6,1))
self.MudaPos(posine)
self.MudaVel(vel)
self.MudaRotCom(rotcom)
self.MudaVelCom(velcom)
#log é o parâmetro que indica quando a simulação armazenou os dados do
#relatório.
#o valor de r adimensional.
log = False
#Indica quando foi armazenado o raio da curva de equilíbrio
equi = False
if tipo == 'port':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
elif tipo == 'starboard':
self.MudaLemeCom(sp.array(-leme*sp.pi/180))
if saida == 'mem':
#Número de linhas das colunas a serem criadas
nlin = len(sp.arange(t0, t, dt))
lemehis = sp.zeros((nlin, 2)) #histórico do leme
velohis = sp.zeros((nlin, 7)) #histórico da velocidade
veloinerhis = sp.zeros((nlin, 4))#histórico da velocidade no
poshis = sp.zeros((nlin, 7)) #histórico da posição no sistema
#inercial
fhis = sp.zeros((nlin, 5)) #histórico de forças
acelhis = sp.zeros((nlin, 7)) #histórico de acelerações
prophis = sp.zeros((nlin, 2)) #histórico Máquina
etahis = sp.zeros((nlin, 2)) #histórico eta
betahis = sp.zeros((nlin, 2)) #histórico beta
del nlin #não preciso mais
elif saida == 'txt':
if os.path.exists(arqs):
os.rename(arqs, arqs + '2')
os.makedirs(arqs)
os.chdir(arqs)
lemehis = open('leme.dat', 'w')#historico do leme
lemehis.write('#Navio ' + self.nome + '\n' +
'#Manobra de Curva de Giro\n#\n')
lemehis.write('#Valor do leme em rad\n')
lemehis.write('#temp'.center(5) + ' ' + 'leme'.rjust(8) + ' ' +
'\n')
velohis = open('velo.dat', 'w') #histórico da velocidade
velohis.write('#Navio ' + self.nome + '\n' +
'#Manobra de Curva de Giro\n#\n')
velohis.write('#Velocidade Sistema Solidário \n#\n')
velohis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' +
'dot roll'.rjust(11) + ' ' + 'dot pitch'.rjust(11) +
' ' + 'dot yaw'.rjust(11) + ' ' + '\n')
veloinerhis = open('veloiner.dat', 'w')#histórico da velocidade no
#sistema inercial. Verificar depois a necessidade
veloinerhis.write('#Navio ' + self.nome + '\n' +
'#Manobra de Curva de Giro\n#\n')
veloinerhis.write('#Velocidade Inercial\n#\n')
veloinerhis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'r'.rjust(11) + '\n')
poshis = open('pos.dat', 'w')#histórico da posição no sistema
#inercial
poshis.write('#Navio ' + self.nome + '\n' +
'#Manobra de Curva de Giro\n#\n')
poshis.write('#Posição e Orientação\n#\n')
poshis.write('#temp'.center(5) + ' ' + 'x'.rjust(11) + ' ' +
'y'.rjust(11) + ' ' + 'z'.rjust(11) + ' ' +
'roll'.rjust(11) + ' ' + 'pitch'.rjust(11) + ' ' +
'yaw'.rjust(11) + ' ' + '\n')
fhis = open('forcas.dat', 'w') #histórico de forças
fhis.write('#Navio ' + self.nome + '\n' +
'#Manobra de Curva de Giro\n#\n')
fhis.write('#Forças e Momentos\n#\n')
fhis.write('#temp'.center(5) + ' ' + 'X'.rjust(11) + ' ' +
'Y'.rjust(11) + ' ' + 'K'.rjust(11) + ' ' +
'N'.rjust(11) + ' ' + '\n')
acelhis = open('acel.dat', 'w') #histórico de acelerações
acelhis.write('#Navio ' + self.nome + '\n' +
'#Manobra de Curva de Giro\n#\n')
acelhis.write('#Aceleração\n#\n')
acelhis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' +
'ddotroll'.rjust(11) + ' ' + 'ddotpitch'.rjust(11)
+ ' ' + 'ddotyaw'.rjust(11) + ' ' + '\n')
prophis = open('propulsor.dat', 'w') #histórico Máquina
prophis.write('#Navio ' + self.nome + '\n' +
'#Manobra de Curva de Giro\n#\n')
prophis.write('#Rotações do propulsor\n#\n')
prophis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + '\n')
etahis = open('eta.dat', 'w') #histórico eta
etahis.write('#Navio ' + self.nome + '\n' +
'#Manobra de Curva de Giro\n#\n')
etahis.write('#eta \n#\n')
etahis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + ' ' + '\n')
betahis = open('beta.dat', 'w') #histórico eta
betahis.write('#Navio ' + self.nome + '\n' +
'#Manobra de Curva de Giro\n#\n')
betahis.write('#Beta \n#\n')
betahis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + ' ' + '\n')
os.chdir('../..')
dados = []
dic = {}
posini = self.MostraPos().copy()
cont =0 #Contador
#iteração
for tp in sp.arange(t0, t, dt):
if cont == 0:
V1 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
elif cont == 1:
V2 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
elif cont == 2:
V3 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
elif cont == 3:
V4 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
else:
V1 = V2
V2 = V3
V3 = V4
V4 = sp.sqrt(self.MostraVel()[0]**2 +
self.MostraVel()[1]**2)
if (log) and (not equi):
#Calcula o desvio padrão das últimas 4 velocidades se for abaixo
#de 'errosr' armazema o raio de equilíbrio da curva como v/r
if stats.tstd((V1, V2, V3, V4)) < errosr:
dic['steadytr'] = (sp.sqrt(self.MostraVel()[0] ** 2 +
self.MostraVel()[1] ** 2) /
self.MostraVel()[5])
dados.append(dic.copy())
equi = True
if not log:
if (abs(abs(self.MostraPos()[5] - posini[5]) -
(sp.pi/2)) <= errotf):
errotf = (abs(abs(self.MostraPos()[5] - posini[5]) - (sp.pi/2)))
dic['transfer'] = abs(self.MostraPos()[1] - posini[1])
dic['advance'] = abs(self.MostraPos()[0] - posini[0])
if (abs(abs(self.MostraPos()[5] - posini[5]) - sp.pi) <= errotd):
errotd = abs(abs(self.MostraPos()[5] - posini[5]) - sp.pi)
dic['taticalDiameter'] = abs(self.MostraPos()[1] - posini[1])
if abs(self.MostraPos()[5] - posini[5]) > sp.pi:
log = True
if peso == None:
par = (GrausDeLib, )
else:
par = (GrausDeLib, peso)
ft = self.VetF(par)
MatRot = self.MatRot()
VelIn = sp.array(MatRot*self.MostraVel()[0:3])
posine = self.MostraPos()[0:3]
vel = self.MostraVel()
#Guardando os parâmetros
#Velocidade Inercial
if saida == 'txt':
veloinerhis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in VelIn:
veloinerhis.write('%.5e'.rjust(11)%(arg) + ' ')
veloinerhis.write('\n')
elif saida == 'mem':
d = sp.hstack(VelIn)
veloinerhis[cont, 1:] = d #
veloinerhis[cont, 0] = tp #
#histórico Leme
if saida == 'txt':
lemehis.write('%.2f'.rjust(5)%(tp) + ' ')
lemehis.write('%.2f'.rjust(5)%(self.MostraLeme()) + '\n')
elif saida == 'mem':
lemehis[cont, 0] = tp
lemehis[cont, 1] = self.MostraLeme()
#histórico da posição
if saida == 'txt':
poshis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraPos():
poshis.write('%.5e'.rjust(11)%(arg) + ' ')
poshis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraPos())
poshis[cont, :] = sp.hstack((tp, temp))
del temp
#histórico da Velocidade
if saida == 'txt':
velohis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in vel:
velohis.write('%.5e'.rjust(11)%(arg) + ' ')
velohis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraVel())
velohis[cont, :] = sp.hstack((tp, temp))
del temp
#histórico das Forças
if saida == 'txt':
temp = sp.zeros((4, 1))
if GrausDeLib == 4:
temp = ft
elif GrausDeLib == 3:
temp[:2] = ft[:2]
temp[3] = ft[2]
fhis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in temp:
fhis.write('%.5e'.rjust(11)%(arg) + ' ')
fhis.write('\n')
elif saida == 'mem':
temp = sp.hstack(sp.array(ft))
if GrausDeLib == 4:
fhis[cont, :] = sp.hstack((tp, temp))
elif GrausDeLib == 3:
fhis[cont, :3] = sp.hstack((tp, temp[:2]))
fhis[cont, 4] = temp[2]
del temp
#histórico Propulsor
if saida == 'txt':
prophis.write('%.2f'.rjust(5)%(tp) + ' ')
prophis.write('%.2f'.rjust(5)%self.MostraRot() + '\n')
elif saida == 'mem':
prophis[cont, :] = sp.hstack((tp, self.MostraRot()))
#histórico eta
if saida == 'txt':
etahis.write('%.2f'.rjust(5)%(tp) + ' ')
if eta == 'rot':
etahis.write('%.2f'.rjust(5) % (self.MostraRotCom() /
self.MostraRot()) + '\n')
elif eta == 'vel':
etahis.write('%.2f'.rjust(5) % (self.MostraVelCom() /
self.MostraVel()[0]) + '\n')
elif saida == 'mem':
if eta == 'rot':
etahis[cont, :] = sp.hstack((tp, self.MostraRotCom() /
self.MostraRot()))
elif eta == 'vel':
etahis[cont, :] = sp.hstack((tp, self.MostraVelCom() /
self.MostraVel()[0]))
#histórico Beta
if saida == 'txt':
betahis.write('%.2f'.rjust(5) % (tp) + ' ')
betahis.write(('%.2f'.rjust(5) % sp.arctan(-self.MostraVel()[1] /
self.MostraVel()[0])) + '\n')
elif saida == 'mem':
betahis[cont, :] = sp.hstack((tp, sp.arctan(-self.MostraVel()[1] /
self.MostraVel()[0])))
#histórico das Acelerações
Acel = self.f2(ft, self.H(GrausDeLib))
vetor = sp.zeros((6, 1))
if GrausDeLib == 4:
vetor[:2] = Acel[:2]
vetor[3] = Acel[2]
vetor [5] = Acel[3]
elif GrausDeLib == 3:
vetor[:2] = Acel[:2]
vetor [5] = Acel[2]
if saida == 'txt':
acelhis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in vetor:
acelhis.write('%.5e'.rjust(11)%(arg[0]) + ' ')
acelhis.write('\n')
elif saida == 'mem':
acelhis[cont, :] = sp.hstack((tp, sp.hstack(vetor)))
#Criação do vetor de graus de liberdade
if GrausDeLib == 4:
vt = sp.zeros((6, 1))
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[3]
vt [3] = self.MostraVel()[5]
vt [4] = self.MostraPos()[3]
vt [5] = self.MostraPos()[5]
elif GrausDeLib == 3:
vt = sp.zeros((4, 1))
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[5]
vt [3] = self.MostraPos()[5]
#integração da aceleração solidária
if met == 'euler':
vt = self.integrador.euler(self.f, vt, tp, dt, par)
elif met =='rk4':
vt = self.integrador.rk4(self.f, vt, tp, dt, par)
if GrausDeLib == 4:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[3] = vt[2]
v[5] = vt[3]
elif GrausDeLib == 3:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[5] = vt[2]
self.MudaVel(v)
del v
#integração da velocidade inercial
x = sp.zeros((6, 1))
if met == 'euler':
x[:3] = self.integrador.euler(self.fvein ,
self.MostraPos()[:3], tp, dt ,
(self.MostraPos()[3:] ,
self.MostraVel()[:3]))
elif met == 'rk4':
x[:3] = self.integrador.rk4(self.fvein, self.MostraPos()[:3],
tp, dt, (self.MostraPos()[3:],
self.MostraVel()[:3]))
if GrausDeLib == 4:
x[3] = vt[4]
x[5] = vt[5]
elif GrausDeLib == 3:
x[5] = vt[3]
self.MudaPos(x)
del x
cont += 1
self.prop.MudaRot(tp)
self.leme.MudaLeme(tp)
if saida == 'txt':
arq = (velohis, poshis, acelhis, fhis, veloinerhis, lemehis, prophis,
etahis)
for arg in arq:
arg.close()
return dados
elif saida == 'mem':
return (velohis, poshis, acelhis, fhis, veloinerhis, lemehis, prophis,
etahis, betahis, dados)
def getCurvaZigZag (self, peso=None, met='euler', t0=0., dt=0.5, t=100.,
GrausDeLib=3, tipo='port', leme=sp.array(20.),
rotcom=None, velcom=None, vel=None, proa=None,
eta='vel', posine=None, osa=sp.array(0.0),
ospath=sp.array(0.0), erro=sp.array(0.005),
saida='txt', arqs='./saida/zz'):
r"""Simula manobras de Zig Zag.
:param GrausDeLib: Graus de liberdade de modelo matemático;
:param met: Método de integração. (default = euler);
:param t0: Tempo inicial;
:param dt: Passo no tempo;
:param t: Tempo final;
:param leme: Ângulo do leme em graus;
:param proa: Ângulo de ataque em graus para iniciar a mudança de leme.
Utilizada na curva de Zig/Zag;
:param osa: Ajuste do erro no ângulo de overshoot para iniciar a mudança
de leme na curva de ZigZag;
:param ospath:
:param errosr:
:param errotf:
:param errotd:
:param saida: Tipo de arquivo de saída;
:param arqs: Nome do arquivo de saída;
:param rotcom: Comando de rotação do propulsor[opcional];
:param velcom: Comando de velocidade da embarcação[opcional];
:param vel: velocidade da embarcação[opcional];
:return: (velohis, poshis, acelhis, fhis, veloinerhis, lemehis,
prophis, etahis). Caso o valor do parâmetro *saida* seja 'txt'
retorna estes valores como arquivos de texto no diretório
indicado pelo parâmetro *arqs*. Mesmo nesse caso a função
retorna uma lista que contém dicionários com parâmetros com
overshoot da proa *'osa'* e overshoot lateral linear *'ospath'*.
Em cada elemento da tupla a primeira coluna é o passo de tempo e
as demais são as variáveis:
* velohis -- histórico de velocidades;
* poshis -- histórico de posições;
* acelhis --- histórico de acelerações;
* fhis -- histórico de forças;
* veloinerhis -- histórico de velocidades no sistema inercial;
* lemehis -- histórico do comando de leme.
:type GrausDeLib: int
:type met: str
:type t0: float;
:type dt: float;
:type t: float;
:type leme: numpy.ndarray;
:type proa: numpy.ndarray
:type osa: numpy.ndarray
:type ospath: numpy.ndarray
:type erro: numpy.ndarray
:type errotf: numpy.ndarray
:type errotd: numpy.ndarray
:type arqs: str
:type rotcom: numpy.ndarray
:type velcom: numpy.ndarray
:type vel: numpy.ndarray
:rtype: tuple
"""
if rotcom == None:
rotcom = self.dic['rotnom']
if velcom == None:
velcom = self.dic['unom']
if vel == None:
vel = sp.zeros((6,1))
vel[0] = self.dic['unom']
if posine == None:
posine = sp.zeros((6,1))
if proa == None:
proa = sp.array(20.)
self.MudaPos( posine)
self.MudaVel(vel)
self.MudaRotCom(rotcom)
self.MudaVelCom(velcom)
#A variável exe é utilizada mais tarde como parâmetro para contar o
#número de execuções do leme. Este valo sinalizará o memento em que
#devemos inverter o valor do leme comandado
if tipo == 'port':
self.MudaLemeCom(sp.array(leme*sp.pi/180))
exe = 0
elif tipo == 'starboard':
self.MudaLemeCom(sp.array(-leme*sp.pi/180))
exe = 1
#Criando espaço na memória para armazenar os parâmetros da curva
if saida == 'mem':
nlin = len(sp.arange(t0, t, dt))
lemehis = sp.zeros((nlin, 2)) #histórico do leme
velohis = sp.zeros((nlin, 7)) #histórico da velocidade
veloinerhis = sp.zeros((nlin, 4))#histórico da velocidade no
#sistema inercial Verificar depois a necessidade
poshis = sp.zeros([nlin, 7]) #histórico da posição no sistema inercial
fhis = sp.zeros((nlin, 5)) #histórico de forças
acelhis = sp.zeros((nlin, 7)) #histórico de acelerações
prophis = sp.zeros((nlin, 2)) #histórico Máquina
etahis = sp.zeros((nlin, 2)) #histórico eta
del nlin #não preciso mais
elif saida == 'txt':
if os.path.exists(arqs):
os.rename(arqs, arqs + '2')
os.makedirs(arqs)
os.chdir(arqs)
lemehis = open('leme.dat', 'w')#histórico do leme
lemehis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva \
de Zig-Zag\n#\n')
lemehis.write('#Valor do leme em rad\n')
lemehis.write('#temp'.center(5) + ' ' + 'leme'.rjust(8) + ' ' + '\n')
velohis = open('velo.dat', 'w') #histórico da velocidade
velohis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva \
de Zig-Zag\n#\n')
velohis.write('#velocidade no Sistema Solidário \n#\n')
velohis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' + 'dot \
roll'.rjust(11) + ' ' + ' dot pitch'.rjust(11) +
' ' + 'dot yaw'.rjust(11) + ' ' + '\n')
#histórico da velocidade no sistema inercial Verificar depois a
#necessidade.
veloinerhis = open('veloiner.dat', 'w')
veloinerhis.write('#Navio ' + self.nome + '\n' + '#Manobra de \
Curva de Zig-Zag\n#\n')
veloinerhis.write('#velocidade Inercial\n#\n')
veloinerhis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'r'.rjust(11) + '\n')
#histórico da posição no sistema inercial
poshis = open('pos.dat', 'w')
poshis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva \
de Zig-Zag\n#\n')
poshis.write('#Posição e Orientação\n#\n')
poshis.write('#temp'.center(5) + ' ' + 'x'.rjust(11) + ' ' +
'y'.rjust(11) + ' ' + 'z'.rjust(11) + ' ' +
'roll'.rjust(11) + ' ' + 'pitch'.rjust(11) + ' ' +
'yaw'.rjust(11) + ' ' + '\n')
#histórico de forças
fhis = open('forcas.dat', 'w')
fhis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva de \
Zig-Zag\n#\n')
fhis.write('#Forças e Momentos\n#\n')
fhis.write('#temp'.center(5) + ' ' + 'X'.rjust(11) + ' ' +
'Y'.rjust(11) + ' ' + 'K'.rjust(11) + ' ' +
'N'.rjust(11) + ' ' + '\n')
#histórico de acelerações
acelhis = open('acel.dat', 'w')
acelhis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva \
de Zig-Zag\n#\n')
acelhis.write('#Aceleração\n#\n')
acelhis.write('#temp'.center(5) + ' ' + 'u'.rjust(11) + ' ' +
'v'.rjust(11) + ' ' + 'w'.rjust(11) + ' ' +
'ddotroll'.rjust(11) + ' ' + ' ddotpitch'.rjust(11)
+ ' ' + 'ddotyaw'.rjust(11) + ' ' + '\n')
#histórico Máquina
prophis = open('propulsor.dat', 'w')
prophis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva \
de Zig-Zag\n#\n')
prophis.write('#Rotações do propulsor\n#\n')
prophis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + '\n')
#histórico eta
etahis = open('eta.dat', 'w')
etahis.write('#Navio ' + self.nome + '\n' + '#Manobra de Curva \
de Zig-Zag\n#\n')
etahis.write('#eta \n#\n')
etahis.write('#temp'.center(5) + ' ' + 'rot'.rjust(8) + ' ' + '\n')
#Voltando ao diretório de trabalho
os.chdir('../..')
dados = []
dic = {}
posini = self.MostraPos().copy()
cont = 0 #Contador
if peso == None:
par = (GrausDeLib, )
else:
par = (GrausDeLib, peso)
#Início da iteração
for tp in sp.arange(t0, t, dt):
#Verificando o momento em que será realizada a mudança do leme.
#Quando guinando para bombordo o valor de exe%2 = True e para boreste o
#contrario.
if (((exe%2 == 0) and self.MostraPos()[5] <= -(proa * sp.pi / 180)) or
(exe%2 != 0 and self.MostraPos()[5] >= (proa * sp.pi / 180))):
self.MudaLemeCom(self.MostraLeme() * (-1))
if ((exe != 0 and tipo == 'port') or
(exe != 1 and tipo == 'starboard')):
dic['reach'] = erro
dic['ospath'] = ospath
dic['osangle'] = osa
dados.append(dic.copy())
osa = sp.array(0.0)
ospath = sp.array(0)
erro = sp.array(0.05)
logospath = False
logosa = False
if tipo == 'port':
dic['exeNumber'] = exe
elif tipo =='starboard':
dic['exeNumber'] = exe - 1
dic['time'] = tp - sp.array(dt)
dic['path'] = self.MostraPos()[1]
dic['proa'] = self.MostraPos()[5]
exe += 1
#Atualizando os parâmetros.
#Este if pergunta se está é a primeira execução
if ((exe != 0 and tipo == 'port') or
(exe != 1 and tipo == 'starboard')):
#Já atingi o maior valor de overshoot da distancia e armazenei este
#valor?
if ((logospath == False) and
(abs(self.MostraPos()[1] - dic['path']) >= ospath)):
ospath = abs(self.MostraPos()[1] - dic['path'])
else:
logospath = True
#Já atingi o maior valor de overshoot do proamento e armazenei este
#valor?
if ((logosa == False) and (abs(self.MostraPos()[5] -
dic['proa']) >= osa)):
osa = abs(self.MostraPos()[5] - dic['proa'])
else:
logosa = True
if abs(abs(self.MostraPos()[5]) - abs(posini[5])) < erro:
erro = abs(self.MostraPos()[5] - posini[5])
MatRot = self.MatRot()
velin = MatRot * sp.matrix(self.vel[0:3])
posine = self.MostraPos()[0:3]
#Cálculo de forças
ft = self.VetF(par)
#Guardando os parâmetros.
#velocidade Inercial
if saida == 'txt':
veloinerhis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in velin:
veloinerhis.write('%.5e'.rjust(11)%(arg) + ' ')
veloinerhis.write('\n')
elif saida == 'mem':
d = sp.hstack(velin)
veloinerhis[cont, 1:] = d
veloinerhis[cont, 0] = tp
#Guardando o histórico do Leme
if saida == 'txt':
lemehis.write('%.2f'.rjust(5)%(tp) + ' ')
lemehis.write('%.2f'.rjust(5)%(self.MostraLeme()) + '\n')
elif saida == 'mem':
lemehis[cont, 0] = tp
lemehis[cont, 1] = self.MostraLeme()
#histórico da posição
if saida == 'txt':
poshis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraPos():
poshis.write('%.5e'.rjust(11)%(arg) + ' ')
poshis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraPos())
poshis[cont, :] = sp.hstack((tp, temp))
del temp
#histórico da velocidade
if saida == 'txt':
velohis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in self.MostraVel():
velohis.write('%.5e'.rjust(11)%(arg) + ' ')
velohis.write('\n')
elif saida == 'mem':
temp = sp.hstack(self.MostraVel())
velohis[cont, :] = sp.hstack((tp, temp))
del temp
#histórico das Forças
if saida == 'txt':
temp = sp.zeros((4, 1))
if GrausDeLib == 4:
temp = ft
elif GrausDeLib == 3:
temp[:2] = ft[:2]
temp[3] = ft[2]
fhis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in temp:
fhis.write('%.5e'.rjust(11)%(arg) + ' ')
fhis.write('\n')
elif saida == 'mem':
temp = sp.hstack(sp.array(ft))
if GrausDeLib == 4:
fhis[cont, :] = sp.hstack((tp, temp))
elif GrausDeLib == 3:
fhis[cont, :3] = sp.hstack((tp, temp[:2]))
fhis[cont, 4] = temp[2]
#histórico Propulsor
if saida == 'txt':
prophis.write('%.2f'.rjust(5)%(tp) + ' ')
prophis.write('%.2f'.rjust(5)%self.MostraRot() + '\n')
elif saida == 'mem':
prophis[cont, :] = sp.hstack((tp, self.MostraRot()))
#histórico eta
if saida == 'txt':
etahis.write('%.2f'.rjust(5)%(tp) + ' ')
if eta == 'rot':
etahis.write('%.2f'.rjust(5) % (self.MostraRotCom() /
self.MostraRot()) + '\n')
elif eta == 'vel':
etahis.write('%.2f'.rjust(5) % (self.MostraVelCom() /
self.MostraVel()[0]) + '\n')
elif saida == 'mem':
if eta== 'rot':
etahis[cont, :] = sp.hstack((tp, self.MostraRotCom() /
self.MostraRot()))
elif eta == 'vel':
etahis[cont, :] = sp.hstack((tp, self.MostraVelCom() /
self.MostraVel()[0]))
#histórico das Acelerações
Acel = self.f2(ft, self.H(GrausDeLib))
vetor = sp.zeros((6, 1))
if GrausDeLib == 4:
vetor[:2] = Acel[:2]
vetor[3] = Acel[2]
vetor [5] = Acel[3]
elif GrausDeLib == 3:
vetor[:2] = Acel[:2]
vetor [5] = Acel[2]
if saida == 'txt':
acelhis.write('%.2f'.rjust(5)%(tp) + ' ')
for arg in vetor:
acelhis.write('%.5e'.rjust(11)%(arg[0]) + ' ')
acelhis.write('\n')
elif saida == 'mem':
acelhis[cont, :] = sp.hstack((tp, sp.hstack(vetor)))
del vetor
#Criação de vetor de graus de liberdade
if GrausDeLib == 4:
vt = sp.zeros([6, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[3]
vt [3] = self.MostraVel()[5]
vt [4] = self.MostraPos()[3]
vt [5] = self.MostraPos()[5]
elif GrausDeLib == 3:
vt = sp.zeros([4, 1])
vt [0] = self.MostraVel()[0]
vt [1] = self.MostraVel()[1]
vt [2] = self.MostraVel()[5]
vt [3] = self.MostraPos()[5]
#Integração da Aceleração solidária
if met == 'euler':
vt = self.integrador.euler(self.f, vt, tp, dt ,par )
elif met =='rk4':
vt = self.integrador.rk4(self.f, vt, tp, dt, par)
#Preparando a saída da integração
if GrausDeLib == 4:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[3] = vt[2]
v[5] = vt[3]
elif GrausDeLib ==3:
v = sp.zeros((6, 1))
v[0] = vt[0]
v[1] = vt[1]
v[5] = vt[2]
self.MudaVel(v)
del v
#Integração da velocidade inercial
x = sp.zeros((6, 1))
if met == 'euler':
x[:3] = self.integrador.euler(self.fvein, self.MostraPos()[:3],
tp, dt, (self.MostraPos()[3:],
self.MostraVel()[:3]))
elif met == 'rk4':
x[:3] = self.integrador.rk4(self.fvein, self.MostraPos()[:3],
tp, dt, (self.MostraPos()[3:],
self.MostraVel()[:3]))
#Preparando a saída da integração
if GrausDeLib == 4:
x[3] = vt[4]
x[5] = vt[5]
elif GrausDeLib == 3:
x[5] = vt[3]
#Preparando os parâmetros para o próximo passo de integração
self.MudaPos(x)
cont += 1
del x
self.prop.MudaRot(tp)
self.leme.MudaLeme(tp)
if saida == 'txt':
arq = (velohis, poshis, acelhis, fhis, veloinerhis, lemehis,
prophis, etahis)
for arg in arq:
arg.close()
return dados
elif saida == 'mem':
return (velohis, poshis, acelhis, fhis, veloinerhis, lemehis,
prophis, etahis, dados)
def simulaTestb(self, p, intervalo=sp.array(5.), V=None):
r"""Gera uma tabela de forças variando com o ângulo :math:`\beta`
:param p: Mesmo parâmetro utilizado para chamar a função :ref: `VetF`;
:param intervalo: Intervalo de variação do ângulo :math:`\beta`;
:param V: Velocidade da embarcação.
:return: Retorna uma matriz com o valor das forças variando de acordo com a
velocidade.
:type intervalo: numpy.ndarray
:Example:
>>> import scipy as sp
>>> import matplotlib.pyplot as plt
>>> import Es
>>> import Navio
>>> entrada = ('NavioTeste', '../dados/bemformatado.dat',
... 'inputtab.dat')
>>> en = Es.es(entrada)
>>> nav = Navio.navio(en.lerarqder())
>>> pa = (1, 1, 1)
>>> pb = (1, 1, 1)
>>> pc = (1, 1, 1)
>>> pd = (1, 1, 1)
>>> p = (pa, pb, pc, pd)
>>> print nav.simulaTestb(p)[:2,]
[[ 0. 0. 0.02576571 0. -0.01349632]
[ 0.08726646 0.00765429 0.46717178 0. 0.20968975]]
"""
if V == None:
V = self.dic['unom']
Velocidade = sp.zeros((6, 1))
saida = sp.zeros([len( sp.arange(0., sp.pi, intervalo * sp.pi / 180)),
5])
contlinha = 0
for beta in sp.arange(0., sp.pi/2, intervalo * sp.pi / 180):
Velocidade[0] = sp.array(V) * sp.cos(beta)
Velocidade[1] = -sp.array(V) * sp.sin(beta)
self.MudaVelCom(Velocidade[0]) #condição que força \eta=1
self.MudaVel(Velocidade)
v = sp.sqrt(Velocidade[0] ** 2 + Velocidade[1] ** 2)
rho = self.dic['rho']
lpp = self.dic['lpp']
vetF = self.VetF((4, p))
saida[contlinha, :] = sp.hstack([beta, vetF[0] * (2 /
(rho * (lpp * (v ** 2)))), vetF[1] *
(2 / (rho * (lpp* (v ** 2)))), vetF[2] *
(2 / (rho * ((lpp * v) ** 2))),
vetF[3] * (2 / (rho * ((lpp * v) **
2)))])
contlinha += 1
return saida
if __name__ == "__main__":
import doctest
doctest.testmod() | gpl-3.0 |
rhshah/iAnnotateSV | iAnnotateSV/AnnotateForCosmic.py | 1 | 2689 | '''
Created on 12/23/2015
@Ronak Shah
'''
import pandas as pd
import logging
import coloredlogs
# Gives elements at particular index in list
getVar = lambda searchList, ind: [searchList[i] for i in ind]
coloredlogs.install(level='DEBUG')
def AnnotateFromCosmicCensusFile (filename, verbose, count, sv):
if(verbose):
logging.info("iAnnotateSV::AnnotateForCosmic: Checking Entry %d in Cosmic", count)
# Initialize List to store comic annotations
list_ccData = []
sv_gene1 = str(sv.loc['gene1'])
sv_gene2 = str(sv.loc['gene2'])
with open(filename, 'r') as filecontent:
header = filecontent.readline()
for line in filecontent:
data = line.rstrip('\n').split('\t')
if(str(data[0]) == sv_gene1):
slicedData = getVar(data,[4,7,9,12,13])
slicedProcessedData = []
for sData in slicedData:
if(sData):
sData = "site1:" + sData
slicedProcessedData.append(sData)
else:
slicedProcessedData.append(" ")
joinedData = '\t'.join(slicedProcessedData)
list_ccData.append(joinedData)
if(str(data[0]) == sv_gene2):
slicedData = getVar(data,[4,7,9,12,13])
slicedProcessedData = []
for sData in slicedData:
if(sData):
sData = "site2:" + sData
slicedProcessedData.append(sData)
else:
slicedProcessedData.append(" ")
joinedData = '\t'.join(slicedProcessedData)
list_ccData.append(joinedData)
return list_ccData
def AnnotateFromCosmicFusionCountsFile (filename, verbose, count, sv):
if(verbose):
logging.info("iAnnotateSV::AnnotateForCosmic: Checking Entry %d in Cosmic Counts data", count)
# Initialize List to store comic annotations
sv_gene1 = str(sv.loc['gene1'])
sv_gene2 = str(sv.loc['gene2'])
sv_combo1 = sv_gene1 + "-" + sv_gene2
sv_combo2 = sv_gene2 + "-" + sv_gene1
countDF = pd.read_csv(filename, sep='\t', header=0, keep_default_na='True')
for index,row in countDF.iterrows():
gene1 = str(row.loc["Gene1"])
gene2 = str(row.loc["Gene2"])
combo1 = gene1 + "-" + gene2
combo2 = gene2 + "-" + gene1
counts = None
if(sv_combo1 == combo1 or sv_combo1 == combo2 or sv_combo2 == combo1 or sv_combo2 == combo2 ):
counts = int(row.loc['Counts'])
break
else:
continue
return counts | apache-2.0 |
abhishekkrthakur/automl_gpu | helena.py | 1 | 1736 | # coding: utf-8
"""
AutoML GPU Track
__author__ : Abhishek Thakur
"""
import cPickle
import numpy as np
import sys
sys.setrecursionlimit(100000)
from sklearn import preprocessing
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
dataset = "helena"
train_data = cPickle.load(open(dataset + '_train.pkl', 'rb'))
test_data = cPickle.load(open(dataset + '_test.pkl', 'rb'))
valid_data = cPickle.load(open(dataset + '_valid.pkl', 'rb'))
labels = cPickle.load(open(dataset + '_labels.pkl', 'rb'))
svd = preprocessing.StandardScaler()
train_data = svd.fit_transform(train_data)
valid_data = svd.transform(valid_data)
test_data = svd.transform(test_data)
test_preds = []
valid_preds = []
NUM_ROUND = 4
for i in range(NUM_ROUND):
print "=============", i
dims = train_data.shape[1]
model = Sequential()
model.add(Dense(400, input_shape=(dims,)))
model.add(BatchNormalization())
model.add(PReLU())
model.add(Dropout(0.2))
model.add(Dense(labels.shape[1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
model.fit(train_data, labels, nb_epoch=200, batch_size=128)
tp = model.predict(test_data)
yp = model.predict(valid_data)
if i == 0:
test_preds = tp
valid_preds = yp
else:
test_preds += tp
valid_preds += yp
test_preds = test_preds * 1./NUM_ROUND
valid_preds = valid_preds * 1./NUM_ROUND
np.savetxt('res/' + dataset + '_test_001.predict', test_preds, '%1.10f')
np.savetxt('res/' + dataset + '_valid_001.predict', valid_preds, '%1.10f')
| mit |
mayblue9/bokeh | examples/glyphs/anscombe.py | 39 | 2945 | from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle, Line
from bokeh.models import (
ColumnDataSource, Grid, GridPlot, LinearAxis, Plot, Range1d
)
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(
x_range=xdr, y_range=ydr,
title=title, plot_width=400, plot_height=400,
border_fill='white', background_fill='#e9e0db'
)
xaxis = LinearAxis(axis_line_color=None)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_line_color=None)
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
line = Line(x='x', y='y', line_color="#666699", line_width=2)
plot.add_glyph(lines_source, line)
circle = Circle(
x=xname, y=yname, size=12,
fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5
)
plot.add_glyph(circles_source, circle)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = GridPlot(children=[[I, II], [III, IV]])
doc = Document()
doc.add(grid)
if __name__ == "__main__":
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
hazelnusse/pydy | examples/bicycle/bicycle_kinematic_limits.py | 1 | 9341 | from sympy import sin, cos, symbols, Matrix
from pydy import ReferenceFrame, Vector, dot
import numpy as np
import time
from scipy.optimize import fsolve
def cythonit(pyxfile, html=False):
import os
os.system('cython ' + pyxfile + '.pyx -a')
if html:
os.system('firefox ' + pyxfile + '.html')
def compileit(cfile, outputfile):
import os
os.system('gcc -shared -pthread -fPIC -fwrapv -O2 -Wall\
-fno-strict-aliasing -I/usr/include/python2.6 -o ' + outputfile + ' ' +\
cfile)
def generate_kf_module():
# Lean, Pitch, Steer
l, p, s = symbols('l p s')
# Frame geometric parameters
rr, rrt, rf, rft, lr, lf, ls = symbols('rr rrt rf rft lr lf ls')
A = ReferenceFrame('A')
B = A.rotate('B', 1, l)
D = B.rotate('D', 2, p)
E = D.rotate('E', 3, s)
# Vector in the plane of the front wheel, pointed towards the ground
g = Vector(A[3] - dot(E[2], A[3])*E[2]).normalized
# Holonomic constraint
f0 = dot(A[3], -rrt*A[3] - rr*B[3] + lr*D[1] + ls*D[3] + lf*E[1] + rf*g + rft*A[3])
f1 = f0.diff(p)
# Vector valued function
F = Matrix([f0, f1])
X = Matrix([l, p])
J = F.jacobian(X)
# Generate string representations of each function
f0_s = str(f0).replace('sin', 'np.sin').replace('cos', 'np.cos')
f1_s = str(f1).replace('sin', 'np.sin').replace('cos', 'np.cos')
J00_s = str(J[0,0]).replace('sin', 'np.sin').replace('cos', 'np.cos')
J01_s = str(J[0,1]).replace('sin', 'np.sin').replace('cos', 'np.cos')
J10_s = str(J[1,0]).replace('sin', 'np.sin').replace('cos', 'np.cos')
J11_s = str(J[1,1]).replace('sin', 'np.sin').replace('cos', 'np.cos')
# Decide on what type of float to use here
dtype = "float64"
fh = open('kinematic_feasibility.pyx', 'w')
f_s = "from __future__ import division\n"
f_s += "import numpy as np\n"
f_s += "cimport numpy as np\n"
f_s += "DTYPE = np." + dtype + "\n"
f_s += "ctypedef np." + dtype + "_t DTYPE_t\n"
f_s += "cimport cython\n"
f_s += "@cython.boundscheck(False)\n"
f_s += "def f(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=1] params):\n"
f_s += ''' """Computes the holonomic constraint and its partial derivative
with respect to pitch.
x: Numpy array of lean and steer, length 2
params: Numpy array of parameters, length 8
in the following order:
rr: Rear wheel radius.
rrt: Rear wheel tire radius.
rf: Front wheel radius.
rft: Front wheel tire radius.
lr: Rear wheel center perpendicular distance from steer axis.
lf: Front wheel center perpendicular distance from steer axis.
ls: Steer axis offset.
s: Steer angle. (treated as a parameter)
Returns a numpy array of the value of the holonomic constraint
in the first entry, and the partial derivative of the holonomic
constraint with respect to pitch in the second entry. The
zeros of this function occur on the boundary of the
kinematically feasibly region in the lean/steer plane.
"""'''
f_s += " # Generated " + time.asctime() + "\n"
f_s += " cdef np.float64_t l = x[0]\n"
f_s += " cdef np.float64_t p = x[1]\n"
f_s += " cdef np.float64_t rr = params[0]\n"
f_s += " cdef np.float64_t rrt = params[1]\n"
f_s += " cdef np.float64_t rf = params[2]\n"
f_s += " cdef np.float64_t rft = params[3]\n"
f_s += " cdef np.float64_t lr = params[4]\n"
f_s += " cdef np.float64_t lf = params[5]\n"
f_s += " cdef np.float64_t ls = params[6]\n"
f_s += " cdef np.float64_t s = params[7]\n"
f_s += " cdef np.ndarray[DTYPE_t, ndim=1] F = np.zeros([2], dtype=DTYPE)\n"
f_s += " F[0] = " + f0_s + "\n"
f_s += " F[1] = " + f1_s + "\n"
f_s += " return F\n\n\n"
f_s += "@cython.boundscheck(False)\n"
f_s += "def df(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=1] params):\n"
f_s += ''' """Evaluates holonomic constraint and its partial derivative
with respect to pitch, with the steer angle treated as parameter.
x: Numpy array of lean and steer, length 2
params: Numpy array of parameters, length 8
in the following order:
rr: Rear wheel radius.
rrt: Rear wheel tire radius.
rf: Front wheel radius.
rft: Front wheel tire radius.
lr: Rear wheel center perpendicular distance from steer axis.
lf: Front wheel center perpendicular distance from steer axis.
ls: Steer axis offset.
s: Steer angle
"""'''
f_s += " # Generated " + time.asctime() + "\n"
f_s += " cdef np.float64_t l = x[0]\n"
f_s += " cdef np.float64_t p = x[1]\n"
f_s += " cdef np.float64_t rr = params[0]\n"
f_s += " cdef np.float64_t rrt = params[1]\n"
f_s += " cdef np.float64_t rf = params[2]\n"
f_s += " cdef np.float64_t rft = params[3]\n"
f_s += " cdef np.float64_t lr = params[4]\n"
f_s += " cdef np.float64_t lf = params[5]\n"
f_s += " cdef np.float64_t ls = params[6]\n"
f_s += " cdef np.float64_t s = params[7]\n"
f_s += " cdef np.ndarray[DTYPE_t, ndim=2] dF = np.zeros([2,2], dtype=DTYPE)\n"
f_s += " dF[0, 0] = " + J00_s + "\n"
f_s += " dF[0, 1] = " + J01_s + "\n"
f_s += " dF[1, 0] = " + J10_s + "\n"
f_s += " dF[1, 1] = " + J11_s + "\n"
f_s += " return dF\n"
f_s += "@cython.boundscheck(False)\n"
f_s += "def convert_geometric(np.ndarray[DTYPE_t, ndim=1] x):\n"
f_s += " cdef np.ndarray[DTYPE_t, ndim=1] lengths = np.zeros([3], dtype=DTYPE)\n"
f_s += " cdef np.float64_t w = x[0]\n"
f_s += " cdef np.float64_t c = x[1]\n"
f_s += " cdef np.float64_t lmbda = x[2]\n"
f_s += " cdef np.float64_t rr = x[3]\n"
f_s += " cdef np.float64_t rrt = x[4]\n"
f_s += " cdef np.float64_t rf = x[5]\n"
f_s += " cdef np.float64_t rft = x[6]\n"
f_s += " lengths[0] = (w+c)*np.cos(lmbda)-(rr+rrt)*np.sin(lmbda)\n"
f_s += " lengths[1] = (rf+rft)*np.sin(lmbda)-c*np.cos(lmbda)\n"
f_s += " lengths[2] = w*np.sin(lmbda) + (rr+rrt-rf-rft)*np.cos(lmbda)\n"
f_s += " return lengths\n"
fh.write(f_s)
fh.close()
cythonit('kinematic_feasibility')
compileit('kinematic_feasibility.c', 'kinematic_feasibility.so')
# Uncomment if you want to regenerate the cython generated kinematic
# feasibility module
#generate_kf_module()
import kinematic_feasibility as kf
w = 1.02
c = 0.08
lmbda = np.pi/10.
rr = 0.3
rrt = 0.0
rf = 0.35
rft = 0.0
benchmark_parameters = np.array([w, c, lmbda, rr, rrt, rf, rft],\
dtype=np.float64)
lr,lf,ls = kf.convert_geometric(benchmark_parameters)
x = np.array([0.0, lmbda], dtype=np.float64)
params = np.array([rr, rrt, rf, rft, lr, lf, ls, 0.0], dtype=np.float64)
step = 0.01
steer_range_12 = np.arange(0.01, np.pi, step)
steer_range_34 = np.arange(-0.01, -np.pi, -step)
# First quadrant
# Initial guess on lean and pitch
x0 = [np.pi/2. + 0.01, np.pi/2 - lmbda]
# List for storing the lean and pitch
lp_range1 = []
for steer in steer_range_12:
params[-1] = steer
lp_range1.append(fsolve(kf.f, x0, args=params))
x0 = lp_range1[-1]
lp1 = np.array(lp_range1)
# Second quadrant
# Initial guess on lean and pitch
x0 = [-np.pi/2. + 0.01, np.pi/2 - lmbda]
# List for storing the lean and pitch
lp_range2 = []
for steer in steer_range_12:
params[-1] = steer
lp_range2.append(fsolve(kf.f, x0, args=params))
x0 = lp_range2[-1]
lp2 = np.array(lp_range2)
# Third quadrant
# Initial guess on lean and pitch
x0 = [-np.pi/2. - 0.01, np.pi/2 - lmbda]
# List for storing the lean and pitch
lp_range3 = []
for steer in steer_range_34:
params[-1] = steer
lp_range3.append(fsolve(kf.f, x0, args=params))
x0 = lp_range3[-1]
lp3 = np.array(lp_range3)
# Fourth quadrant
# Initial guess on lean and pitch
x0 = [np.pi/2. + 0.01, np.pi/2 - lmbda]
# List for storing the lean and pitch
lp_range4 = []
for steer in steer_range_34:
params[-1] = steer
lp_range4.append(fsolve(kf.f, x0, args=params))
x0 = lp_range4[-1]
lp4 = np.array(lp_range4)
import matplotlib.pyplot as plt
grey = (0.5, 0.5, 0.5)
fig = plt.figure()
plt.plot(lp1[:,0], steer_range_12,color=grey)
plt.fill_betweenx(steer_range_12, lp1[:,0], np.pi/2.,color=grey)
plt.plot(lp2[:,0], steer_range_12,color=grey)
plt.fill_betweenx(steer_range_12, -np.pi/2., lp2[:,0],color=grey)
plt.plot(lp3[:,0], steer_range_34,color=grey)
plt.fill_betweenx(steer_range_34, -np.pi/2., lp3[:,0],color=grey)
plt.plot(lp4[:,0], steer_range_34,color=grey)
plt.fill_betweenx(steer_range_34, lp4[:,0], np.pi/2.,color=grey)
plt.axis([-np.pi/2, np.pi/2., -np.pi, np.pi])
plt.xlabel('Lean')
plt.ylabel('Steer')
plt.title('Kinematically feasible region')
plt.grid()
plt.show()
| bsd-3-clause |
tectronics/princess-alist | home/xiaogaozi/src/Python/collatz_conjecture.py | 1 | 1258 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Collatz Conjecture. <http://code.google.com/p/princess-alist/>
# Copyright (C) 2009 xiaogaozi <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
def CollatzConjecture(n):
"""Collatz Conjecture"""
t = 0
while n != 1:
t += 1
if n % 2 == 0:
n /= 2
else:
n = n * 3 + 1
return t
if __name__ == "__main__":
x = range(2, 1000000)
y = []
for n in x:
y.append(CollatzConjecture(n))
plt.plot(x, y, 'ro')
plt.axis([0, x[len(x) - 1] + 100, 0, y[len(y) - 1] + 150])
plt.show()
| gpl-3.0 |
bigdataelephants/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
marcoalopez/JASPE | jaspe/jaspe.py | 1 | 15743 | # -*- coding: utf-8 -*-
# ============================================================================ #
# #
# JASPE script #
# JASPE stands for Just Another Stereoplot in a Python Environment #
# #
# Copyright (c) 2017-present Marco A. Lopez-Sanchez #
# #
# This Source Code Form is subject to the terms of the Mozilla Public #
# License, v. 2.0. If a copy of the MPL was not distributed with this #
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #
# #
# Covered Software is provided under this License on an “AS IS” BASIS, #
# WITHOUT WARRANTY OF ANY KIND, either expressed, implied, or statutory, #
# including, without limitation, warranties that the Covered Software is #
# FREE OF DEFECTS, merchantable, fit for a particular purpose or #
# non-infringing. The entire risk as to the quality and performance #
# of the Covered Software is with You. Should any Covered Software prove #
# defective in any respect, You (not any Contributor) assume the cost of #
# any necessary servicing, repair, or correction. This disclaimer of #
# warranty constitutes an essential part of this License. No use of any #
# Covered Software is authorized under this License except under this #
# disclaimer. #
# #
# Version alpha 0.3 #
# For details see: https://github.com/marcoalopez/JASPE #
# download at https://github.com/marcoalopez/JASPE/releases #
# #
# Requirements: #
# Python version 3.5 or higher #
# Numpy version 1.11 or higher #
# Matplotlib version 2.0 or higher #
# Pandas version x.x or higher #
# #
# ============================================================================ #
# Import some scientific libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
# Examples
# fig, ax = stereoplot()
# fig, (ax1, ax2) = stereoplot(nrows=1, ncols=2)
# fig, ((ax1, ax2), (ax3, ax4)) = stereoplot(nrows=2, ncols=2)
ax = None
def stereoplot(nrows=1, ncols=1):
"""Automatically generate a defined number of stereoplots using
the matplotlib library
Parameters
----------
nrows : positive integer
the number of rows of the subplot grid
ncols : positive integer
the number of columns of the subplot grid
Call functions
--------------
set_stereo
Return
------
TODO
"""
num_plots = nrows * ncols
if num_plots == 1:
fig, ax = plt.subplots()
set_stereo(ax)
fig.tight_layout()
return fig, ax
else:
if nrows == 1:
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, sharey=True)
elif ncols == 1:
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, sharex=True)
else:
fig, ax = plt.subplots(nrows=nrows, ncols=ncols)
for item in fig.axes:
set_stereo(item)
fig.tight_layout()
return fig, fig.axes
def set_stereo(ax):
"""Tweak the matplotlib figure axes to plot a stereographic projection of
unit radius.
Parameters
----------
ax : a matplotlib object
the axes
"""
# remove default matplotlib elements with no interest
ax.tick_params(
axis='both',
which='both',
bottom=False,
top=False,
left=False,
labelbottom=False,
labelleft=False)
# draw the contour and the centre of the circle
ax.plot(0, 0, 'k+')
circ = plt.Circle((0, 0), 1.0, facecolor='none', edgecolor='black')
ax.add_patch(circ)
ax.axis('equal') # equal aspect ratio
ax.axis('off') # remove the box
return None
def plot_eq_area(azimuth, dip, axe=ax, **kwargs):
"""Plot the coordinates of a line in an equal area projection
of unit radius.
Parameters
----------
azimuth : scalar or array_like with values between 0 and 360
angle between the north vector and the line on the horizontal
plane in spherical coordinates (0 - 360 degrees)
dip : scalar or array_like with values between 0 and 90
plunge or dip of line (0 - 90 degrees)
axe : object, ax by default
the figure axe
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Examples
--------
>>> plot_eq_area(180, 45)
>>> plot_eq_area(270, 45, marker='h', color='green', markersize=12)
Call function(s)
--------------
- sph_to_eq_area
"""
x, y = sph_to_eq_area(azimuth, dip)
return axe.plot(x, y, 'o', **kwargs)
def plot_eq_angle(azimuth, dip, axe=ax, **kwargs):
"""Plot the coordinates of a line in an equal angle projection
of unit radius.
Parameters
----------
azimuth : scalar or array_like with values between 0 and 360
angle between the north vector and the line on the horizontal
plane in spherical coordinates (0 - 360 degrees)
dip : scalar or array_like with values between 0 and 90
plunge or dip of line (0 - 90 degrees)
axe : object, ax by default
the figure axe
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Examples
--------
>>> plot_eq_angle(180, 45)
>>> plot_eq_angle(270, 45, marker='h', color='blue', markersize=12)
Call function(s)
--------------
- sph_to_eq_angle
"""
x, y = sph_to_eq_angle(azimuth, dip)
return axe.plot(x, y, 'o', **kwargs)
def sph_to_eq_area(azimuth, dip):
"""Calculate the spherical coordinates of a line in an equal area stereographic
projection of unit radius
Parameters
----------
azimuth : scalar or array_like with values between 0 and 360
angle between the north vector and the line on the horizontal
plane in spherical coordinates (0 - 360 degrees)
dip : scalar or array_like with values between 0 and 90
plunge or dip of line in spherical coordinates (degrees)
Returns
-------
a float or numpy array with the stereographic equal-area coordinates (x, y)
"""
# convert degrees to radians
azimuth = np.deg2rad(azimuth)
dip = np.deg2rad(dip)
x = np.sqrt(2) * np.sin((np.pi / 4.0) - (dip / 2)) * np.sin(azimuth)
y = np.sqrt(2) * np.sin((np.pi / 4.0) - (dip / 2)) * np.cos(azimuth)
return x, y
def sph_to_eq_angle(azimuth, dip):
"""Calculate the spherical coordinates of a line in an equal angle
stereographic projection of unit radius
Parameters
----------
azimuth : scalar or array_like with values between 0 and 360
angle between the north vector and the line on the horizontal
plane in spherical coordinates (0 - 360 degrees)
dip : scalar or array_like with values between 0 and 90
plunge or dip of line in spherical coordinates (degrees)
Returns
-------
a float or numpy array with the coordinates (x, y)
"""
# convert degrees to radians
azimuth = np.deg2rad(azimuth)
dip = np.deg2rad(dip)
x = np.tan((np.pi / 4.0) - (dip / 2)) * np.sin(azimuth)
y = np.tan((np.pi / 4.0) - (dip / 2)) * np.cos(azimuth)
return x, y
def sph_to_cart(azimuth, dip, plot3D=False):
"""Convert from spherical (azimuth, dip) to cartesian coordinates.
It returns the north, east, and down direction cosines of a line.
Parameters
----------
azimuth : scalar or array_like with values between 0 and 360
angle between the north vector and the line on the horizontal
plane in spherical coordinates (0 - 360 degrees)
dip : scalar or array_like with values between 0 and 90
plunge or dip of line in spherical coordinates (degrees)
Returns
-------
a numpy array with the direction cosines (north, east, down)
"""
# convert degrees to radians
azimuth = np.deg2rad(azimuth)
dip = np.deg2rad(dip)
# estimate direction cosines
east_cos = np.cos(dip) * np.sin(azimuth)
north_cos = np.cos(dip) * np.cos(azimuth)
down_cos = np.sin(dip)
if plot3D is True:
from mpl_toolkits.mplot3d import Axes3D
figura = plt.figure()
ax3d = figura.add_subplot(111, projection=Axes3D.name)
ax3d.scatter(xs=north_cos, ys=east_cos, zs=down_cos, zdir='z')
return north_cos, east_cos, down_cos
def cart_to_sph(north_cos, east_cos, down_cos):
"""Convert from cartesian to spherical coordinates and returns the
azimuth and the plunge of a line.
Parameters
----------
north_cos : scalar or array-like
north direction cosine
east_cos : scalar or array-like
east direction cosine
down_cos : scalar or array-like
down direction cosine
Call function
-------------
- zero_to_pi
Returns
-------
a numpy array with spherical coordinates (azimuth, dip)
"""
# calculate dip
dip = np.arcsin(down_cos)
# calculate azimuth
if north_cos == 0.0: # north direction cosine zero case
if east_cos < 0.0:
azimuth = (3 / 2) * np.pi # azimuth is West
else:
azimuth = (1 / 2) * np.pi # azimuth is East
else:
azimuth = np.arctan(east_cos / north_cos)
if north_cos < 0.0:
azimuth = azimuth + np.pi
# Check whether azimuth lies between 0 and 2*pi radians
azimuth = zero_to_pi(azimuth)
# convert radians to degrees
azimuth = np.rad2deg(azimuth)
dip = np.rad2deg(dip)
return azimuth, dip
def mean_vector(azimuth, dip, conf=95):
"""Estimate the mean vector for a given set of vectors.
Parameters
----------
azimuth : scalar or array-like with values between 0 and 360
line direction (azimuth) in spherical coordinates (degrees)
plunge : scalar or array_like with values between 0 and 90
plunge or dip of line in spherical coordinates (degrees)
conf : integer or float between 0 and 100
the cone level of confidence (default 95 %)
Call functions
--------------
- sph_to_cart
- cart_to_sph
Returns
-------
mean azimuth and dip
"""
n = len(azimuth)
# sum the different cosine directions
Nc, Ec, dc = sph_to_cart(azimuth, dip)
Nc_sum, Ec_sum, dc_sum = np.sum(Nc), np.sum(Ec), np.sum(dc)
# Estimate the resultant vector (R)
R = np.sqrt(Nc_sum**2 + Ec_sum**2 + dc_sum**2)
rave = R / n # normalize to n
# check significance
if rave < 0.1:
print('Mean vector is insignificant')
else:
Nc_sum, Ec_sum, dc_sum = Nc_sum / R, Ec_sum / R, dc_sum / R
# convert mean vector to lower hemisphere
if dc_sum < 0.0:
Nc_sum, Ec_sum, dc_sum = -Nc_sum, -Ec_sum, -dc_sum
# convert direction cosines to spherical coordinates (azimuth and dip)
azimuth, dip = cart_to_sph(Nc_sum, Ec_sum, dc_sum)
# Estimate statistics based on Fisher et al. (1987)
# Concentration factor
if R < n:
if n < 16:
afact = 1.0 - (1.0 / n)
conc = (n / (n - R)) * afact**2
else:
conc = (n - 1.0) / (n - R)
# estimate uncertainty cones
if rave >= 0.65 and rave < 1.0:
bfact = 1.0 / (n - 1.0)
afact = 1.0 / (1.0 - conf)
d = np.arccos(1.0 - ((n - R) / R) * (afact**(bfact - 1.0)))
print('Mean vector = {a}/{b} azimuth/dip' .format(a=azimuth, b=dip))
print('Fisher statistics:')
print('concentration factor =', conc)
print('uncertainty cone = {a} at {b} % level of confidence' .format(a=d, b=conf))
return azimuth, dip
def zero_to_pi(azimuth):
"""Constrains azimuth between 0 and 2*pi radians
Parameter
---------
azimuth : float
the azimuth in radians
"""
if azimuth < 0.0:
azimuth = azimuth + (2 * np.pi)
else:
azimuth = azimuth - (2 * np.pi)
return azimuth
def import_data(file_path='auto', delimiter=None, skiprows=None):
""" Extract the data from tabular-like files
Parameters
----------
file_path : string
The file location in the OS in quotes
e.g: 'C:/...yourFileLocation.../nameOfTheFile.csv'
If 'auto' (the default) the function will ask you for the location of
the file through a file selection dialog.
delimiter : string, default: None
Delimiter to use. The pandas method try to automatically detect the
separator, but it can be defined by the user.
skiprows : integer, list-like or callable. Default: None
Line numbers to skip (0-indexed) or number of lines to skip (int) at the
start of the text file.
Returns
-------
A pandas dataframe (tabular data) with all the data contained in the text file
plus a quick view of the data imported (in the console)
"""
if file_path == 'auto':
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
root.attributes("-topmost", True)
file_path = filedialog.askopenfilename(initialdir=os.getcwd(),
title="Select file",
filetypes=[('Text files', '*.txt'),
('Text files', '*.csv')])
# check the extension of the file
if file_path.endswith('.txt'):
data_frame = pd.read_table(file_path, delimiter=delimiter, skiprows=skiprows)
elif file_path.endswith('.csv'):
data_frame = pd.read_csv(file_path, delimiter=delimiter, skiprows=skiprows)
else:
print("Error: The file is not a 'txt' nor 'csv' or the file extension was not specified.")
return None
print(' ')
print(data_frame.head())
print('...')
print(data_frame.tail())
print(' ')
return data_frame
def plot_projection(ax, form='area'):
"""Plot a text indicating whether the projection is equal-area
or equal-angle
Parameters
----------
ax :
the Matplotlib axe
form : string
'area' for equal-area, 'angle' for equal-angle
"""
if form == 'area':
ax.text(-1, -1, 'Equal area projection \n Lower hemisphere',
horizontalalignment='center')
return None
elif form == 'angle':
ax.text(-1, -1, 'Equal angle projection \n Lower hemisphere',
horizontalalignment='center')
return None
else:
print("Wrong form. Please use 'area' or 'angle'")
return None
| mpl-2.0 |
KrasnitzLab/SCGV | scgv/views/clone.py | 1 | 1930 | '''
Created on Dec 15, 2016
@author: lubo
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scgv.views.base import ViewerBase
class CloneViewer(ViewerBase):
def __init__(self, model):
super(CloneViewer, self).__init__(model)
if self.model.clone is not None and self.model.subclone is not None:
self._select_colormap()
def _select_colormap_size(self, size):
assert size <= 20
if size > 12:
cmap = plt.get_cmap('tab20')
elif size > 7:
cmap = plt.get_cmap('Paired')
else:
cmap = plt.get_cmap('tab10')
colors = ['#FFFFFF']
colors.extend(cmap.colors[:size])
return ListedColormap(colors)
def _select_colormap(self):
self.vmax = max(np.max(self.model.clone), np.max(self.model.subclone))
size = int(self.vmax)
self.cmap = self._select_colormap_size(size)
def draw_clone(self, ax):
if self.model.clone is not None:
ax.imshow(
[self.model.clone],
aspect='auto',
interpolation='nearest',
cmap=self.cmap,
# norm=self.cmap.norm,
vmin=0,
vmax=self.vmax,
extent=self.model.bar_extent)
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([0.0])
ax.set_yticklabels(["Clone"])
def draw_subclone(self, ax):
if self.model.subclone is not None:
ax.imshow(
[self.model.subclone],
aspect='auto',
interpolation='nearest',
cmap=self.cmap,
vmin=0,
vmax=self.vmax,
extent=self.model.bar_extent)
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
| mit |
Lawrence-Liu/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
Vimos/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 28 | 3792 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'Got X with X.ndim=1',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'Got X with X.ndim=1',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
jselsing/GRB111117A | py/line_flux_lya.py | 1 | 3205 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Plotting
import matplotlib; matplotlib.use('TkAgg')
import matplotlib.pyplot as pl
import seaborn; seaborn.set_style('ticks')
from astropy.io import fits
import pandas as pd
import numpy as np
from scipy import stats, interpolate
import matplotlib as mpl
from astropy.convolution import Gaussian1DKernel, Gaussian2DKernel, convolve
from astropy.cosmology import Planck15 as cosmo
from astropy import units as u
from astropy import constants as c
params = {
'axes.labelsize': 10,
'font.size': 10,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': False,
'figure.figsize': [7.281, 4.5]
}
def convert_air_to_vacuum(air_wave) :
# convert air to vacuum. Based onhttp://idlastro.gsfc.nasa.gov/ftp/pro/astro/airtovac.pro
# taken from https://github.com/desihub/specex/blob/master/python/specex_air_to_vacuum.py
sigma2 = (1e4/air_wave)**2
fact = 1. + 5.792105e-2/(238.0185 - sigma2) + 1.67917e-3/( 57.362 - sigma2)
vacuum_wave = air_wave*fact
# comparison with http://www.sdss.org/dr7/products/spectra/vacwavelength.html
# where : AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4)
# air_wave=numpy.array([4861.363,4958.911,5006.843,6548.05,6562.801,6583.45,6716.44,6730.82])
# expected_vacuum_wave=numpy.array([4862.721,4960.295,5008.239,6549.86,6564.614,6585.27,6718.29,6732.68])
return vacuum_wave
def main():
"""
# Script to get lineflux
"""
# Get extraction
data = np.genfromtxt("../data/spectroscopy/UVBext_lya.dat", dtype=None)
wl = data[:, 1]
wl_mask = (wl > 3850) & (wl < 3960)
flux = data[:, 2]
error = data[:, 3]
wl, flux, error = wl[wl_mask], flux[wl_mask], error[wl_mask]
error[error > 1e-15] = np.median(error)
mask = (wl > convert_air_to_vacuum(3907) - 12) & (wl < convert_air_to_vacuum(3907) + 12)
continuum_fit = np.polynomial.chebyshev.chebfit(wl[~mask], flux[~mask], deg=2)
continuum = np.polynomial.chebyshev.chebval(wl, continuum_fit)
pl.plot(wl, continuum - continuum)
flux = flux - continuum
F_lya = np.trapz(flux[mask], x=wl[mask])
print("Total %0.1e" %F_lya)
F_lya_err = np.sqrt(np.trapz((error**2.)[mask], x=wl[mask]))
dL = cosmo.luminosity_distance(2.211).to(u.cm).value
L_lya = F_lya * 4 * np.pi * dL**2
L_lya_err = F_lya_err * 4 * np.pi * dL**2
print(L_lya)
SFR = 9.1e-43*L_lya / 1.64
SFR_err = 9.1e-43*L_lya_err / 1.64
print(SFR, SFR_err)
# flux[flux_sky > 10000] = np.nan
pl.plot(wl, flux, label = "Spectrum")
pl.plot(wl[mask], flux[mask], label = "Integration limits")
pl.errorbar(wl, flux, yerr=error, fmt=".k", capsize=0, elinewidth=0.5, ms=3, label=r"f$_{[L\alpha]}$ = %0.1e +- %0.1e" % (F_lya, F_lya_err))
pl.errorbar(1, 1, yerr=1, fmt=".k", capsize=0, elinewidth=0.5, ms=3, label="SFR = " +str(np.around(SFR, 0)) + " +-" + str(np.around(SFR_err, 0)))
pl.xlim(3850, 3960)
pl.ylim(-0.5e-17, 1e-17)
# Save figure for tex
pl.legend()
pl.savefig("../figures/lya_flux.pdf", dpi="figure")
pl.show()
if __name__ == '__main__':
main() | mit |
fridewald/QEdark_1k | tools/bandsndos/bandsndos_mirror.py | 4 | 20068 | #
# Adrian Soto
# 22-12-2014
# Stony Brook University
#
################################################
# Plot band structure and DOS from the
# output of the bands.x program in the
# Quantum Espresso package.
#
# Features:
# 1) Allows for scissor correction (band shift)
# 2)
#
################################################
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.ticker import AutoMinorLocator
import matplotlib.gridspec as gridspec
import csv
plt.rcParams['font.family'] = 'Serif'
plt.rcParams['font.serif'] = 'Times New Roman'
#rcParams['text.usetex'] = True
rcParams['font.size'] = 24
class band:
def __init__(self, numkpoints, bandenergies):
self.nks = numkpoints
if (len(bandenergies) != numkpoints):
print "ERROR: list of band energies has wrong length. Setting band to 0."
self.nrg = [0] * numkpoints
else:
self.nrg = bandenergies
def printband(self):
print self.nrg
def shift(self, delta):
self.nrg = map(lambda x : x+delta, self.nrg) # watch for scope here.
return
################################################
# End of class band
################################################
class kpoints:
def __init__(self):
self.klist = []
class dos:
def __init__(self): #, numE, dosE, dosG, dosI):
self.numE = 0
self.dosE = []
self.dosG = []
self.dosI = []
def Load(self, dosfile):
#
# Load DOS from dos.x output
#
print " "
print "Loading DOS from ", dosfile
print " "
# Count lines in file
self.numE=sum(1 for line in open(dosfile))
# Read file line by line and process
f=open(dosfile, 'r')
# First line is header. Discard
data=f.readline()
# Iterate over file lines
for ilin in range(1,self.numE):
data=f.readline()
E=float(data[0:7])
self.dosE.append(E)
G=float(data[9:19])
self.dosG.append(G)
I=float(data[21:31])
self.dosI.append(I)
f.close()
return
################################################
# End of class dos
################################################
#
# Global functions
#
def w0gauss(x):
# As in flib/w0gauss.f90 in the QE package
pi = 3.141592653589793
sqrt2=math.sqrt(2)
arg = min([200.0, (x - 1.0 / sqrt2 ) **2])
w0 = (1.0/math.sqrt(pi)) * math.exp(-1.0 * arg )*(2.0 - sqrt2*x)
return w0
def ReadBandStructure(bandsfile):
#
# This function reads the band structure as written
# to output of the bands.x program. It returns the bs
# as a flat list with all energies and another list with
# the k-point coordinates.
#
f = open(bandsfile, 'r')
# First line contains nbnd and nks. Read.
currentline = f.readline()
nks = int(currentline[22:26])
nbnd = int(currentline[12:16])
# Following lines contain the k-point coordinates
# and the band energies.
# Calculate number of lines containing band structure:
# nks k-point lines
# At each k-point there are (1+nbnd/10) energy values.
nlpkp = 1+nbnd/10 # Number of Lines Per K-Point
nlines = nks + nks * nlpkp
bsaux = []
xk = []
for ik in range (0, nks):
currentline = f.readline()
#kpoint = currentline[12:40]
kpoint = [float(x) for x in currentline.split()]
xk.append(kpoint)
auxenerg = []
for ibnd in range(0, nlpkp):
currentline = f.readline()
# append current line to auxiliary list
auxenerg.append( float(x) for x in currentline.split() )
# flatten list of lists containing energies for a given kpoint
# (each sublist corresponds to one line in the bands.dat file)
energ = [item for sublist in auxenerg for item in sublist]
# Sort ascendingly band energies for current k-point (to
# prevent artificial level crossings if QE bands.x output
# does not sort them correctly) and append to band structure
bsaux.append(sorted(energ))
f.close()
# Flatten bs list
bsflat = [item for sublist in bsaux for item in sublist]
return nks, nbnd, xk, bsflat
def SortByBands(nks, nbnd, bsflat):
# Rearrarange bs from k-points to bands
bs = []
for ibnd in range (0, nbnd):
currentband=[]
for ik in range (0, nks):
#currentband.append(bsflat[ik*nbnd+ibnd])
bs.append(bsflat[ik*nbnd+ibnd])
#bs.append( currentband )
return bs
def FindHLGap(nks, hvb, lcb):
#
# Find HOMO and LUMO energies and energy gap
#
# hvb = highest valence band
# lcb = lowest conduction band
#
# Ehvb = highest valence energy or HOMO energy
# Elcb = lowest conduction energy or LUMO energy
#
gap = lcb[0] - hvb[0]
for ik1 in range (0, nks):
auxcond = lcb[ik1]
for ik2 in range (0, nks):
auxval = hvb[ik2]
currentgap = auxcond-auxval
if (currentgap < 0.0):
print "ERROR: negative gap"
elif (currentgap < gap):
gap = currentgap
Ehvb = max(hvb)
Elcb = min(lcb)
return Ehvb, Elcb, gap
def Scissor(nks, newgap, bands, shifttype):
#
# shifttype == 0 : shift valence bands by -0.5*delta and
# conduction bands by 0.5*delta
# shifttype == 1 : as in 0 but placing the highest valence
# energy at 0.0
# shifttype == 2 : as in 0 but placing the gap center at 0.0
#
EHOMO, ELUMO, oldgap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta=(newgap-oldgap)/2.0
# Apply scissor to band structure
for ibnd in range (0, nbnd):
if (ibnd < nval):
bands[ibnd].shift(-1.0*delta)
else:
bands[ibnd].shift(delta)
if (shifttype==0):
print "Scissor correction to band energies has been applied."
return
elif (shifttype==1):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -1.0*EHOMO
#print "delta=", delta
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Highest valence energy has been set to 0.0 eV"
return
elif (shifttype==2):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -0.5*(EHOMO+ELUMO)
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Gap center has been set to 0.0 eV"
return
else:
print "ERROR: shifttype has an non-valid value. Default value shifttype==0."
print "Scissor correction to band energies has been applied."
return
def CreateDOS(nks, nbnd, bzv, Emin, Emax, deltaE, bnd, normalize):
# ATTENTION: bnd must be an object of the class band
Emin = min(bnd[10].nrg)
Emax = max(bnd[nbnd-1].nrg)
ndos = int((Emax - Emin)/deltaE + 0.50000001) # int always rounds to lower integer
dosE = []
dosG = []
intg=0.0
deltaEgauss=5.0*deltaE
d3k=(1.0/nks)*bzv
wk=2.0/nks
print "Creating DOS with uniform k-point weights"
# Create DOS
for idos in range (0, ndos):
E = Emin + idos * deltaE
dosg = 0.0
for ik in range(0, nks):
for ibnd in range (0, nbnd):
dosg = dosg + w0gauss ( (E - bnd[ibnd].nrg[ik] ) / deltaEgauss ) * wk
###dosg = dosg + w0gauss ( (E - bnd[ibnd].nrg[ik] ) / deltaE ) * wk
dosg = dosg/deltaEgauss
intg = intg + dosg*deltaE # integrated DOS
dosE.append(E)
dosG.append(dosg)
print "\n Integrated DOS=", intg, "\n"
# Normalize DOS
if (normalize == 1):
print "Normalizing DOS to 1.0 \n"
dosGnorm=dosG
for idos in range (0, ndos):
dosGnorm[idos]=dosGnorm[idos]/intg
return dosE, dosGnorm
if(normalize==0):
return dosE, dosG
else:
print " ERROR!! in CreateDOS function: wrong DOS normalization choice."
return
def PlotBandStructure(nbnd, nval, bnd, plotfile, Ef, sympoints, nks_btw_sympoints ):
#
# ATTENTION: bnd must be an object of the class band
#
# nval: number of valence bands
# Ef: Fermi Energy. If false then it won't print horizontal line
# sympoints: list containing labels of symmetry points
# nks_btw_sympoints: number of k-points between symmetry points
#
# NOTE: this function assumes that the number of points
# between symmetry points is constant
#
print "Plotting band structure to", plotfile
col = 'k'
for ibnd in range (0, nbnd):
#if (ibnd < nval):
# col='b'
#else:
# col='r'
plt.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
y_min = min(bnd[0].nrg)
y_max = min(bnd[nbnd-1].nrg)
plt.xlabel("Brillouin zone path")
plt.ylabel("band energies (eV)")
numsympoints = len(sympoints)
kpath=[]
xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints)
for i in range(0, numsympoints):
kpath.append(sympoints[i])
if (i < numsympoints-1):
for j in range (0, nks_btw_sympoints-1):
kpath.append('')
# plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs)
# Ticks and vertical lines across BS plot
plt.xticks(xticks, sympoints)
for i in range(0,numsympoints):
plt.axvline(x=xticks[i], ymin=y_min, ymax=y_max, hold=None, color='k', linewidth=0.25)
if (not Ef):
plt.axhline(Ef, color="black", linestyle="--")
plt.xlim( 0, len(bnd[0].nrg)-1 )
plt.savefig(plotfile)
return
def PlotDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of reals
plt.plot(dosG, dosE)
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.gca().set_xlim(left=0)
plt.savefig(plotname)
return
def PlotBnD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, dosE, dosG, plotname):
col = 'k'
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for ibnd in range (0, nbnd):
ax1.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
ax1.set_title('Sharing Y axis')
ax2.plot(dosG, dosE)
ax2.set_xlim([0.0, 0.1])
plt.ylim([-15.0, 20.0])
#plt.subplots_adjust(left=0.0, right=0.8)
plt.subplots_adjust(wspace = 0.0)
plt.show()
return
def PlotBnDD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, plotname):
######################################
# Plot generation and formatting
######################################
# Two subplots, unpack the axes array immediately
gs = gridspec.GridSpec(1, 2,width_ratios=[1,4])
f = plt.figure()
ax1 = plt.subplot(gs[1])
ax2 = plt.subplot(gs[0])
# Formatting
col = 'k'
ax1.set_xlabel("Brillouin zone path")
ax1.xaxis.set_label_position("bottom")
ax1.set_ylabel("E [eV]", rotation=270)
ax1.yaxis.set_label_position("right")
ax1.text(3.50-0.12, -12.50, 'Ge', fontsize=28)
###ax2.text(0.07, 18.00, 'Si', fontsize=18)
ax2.set_xlabel("DOS \n [eV$^{-1}$]")
ax2.xaxis.set_label_position("top")
#ax2.set_ylabel("E [eV]", rotation=270)
#y_min = -32.0
y_min = -13.0
y_max = 20.0
x2_min = 0.00
x2_max = 5.00
# Mirror
x2_min = 0.12
x2_max = 0.00
ax1.set_ylim([y_min, y_max])
ax2.set_xlim([x2_min, x2_max])
#ax2.set_xlim([0.0, 10.0])
ax2.set_ylim([y_min, y_max])
# Ticks
#minor_locator = AutoMinorLocator(2)
#ax2.xaxis.set_minor_locator(minor_locator)
# Number of symmetry points
numsympoints = len(sympoints)
# Generate horizontal axis containing k-path accumulated length (for BS plot)
x=0.0
klen=[x]
dx=1.0/((numsympoints-1)*nks_btw_sympoints)
for isym in range(0, numsympoints-1):
dx=sym_pt_dists[isym]/nks_btw_sympoints
for ipt in range(1, nks_btw_sympoints+1):
x=x+dx
klen.append(x)
#xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints)
xticks=[]
for isym in range(0, numsympoints):
j = isym * nks_btw_sympoints
xticks.append(klen[j])
x1_min=min(xticks)
x1_max=max(xticks)
ax1.set_xlim(x1_min, x1_max)
# Plot bands
col = '0.4'
for ibnd in range (0, nbnd):
ax1.plot(klen , bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
# plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs)
# Ticks and vertical lines across BS plot
ax1.set_xticks(xticks)
ax1.set_xticklabels(sympoints)
# Plot DOSs
ax2.plot(dosG1, dosE1, linestyle='-', linewidth=1.0, color='b')
ax2.plot(dosG2, dosE2, linestyle='-', color='r')
#dosticks=[0.0, 0.05, 0.1, 0.15]
dosticks=[5, 0] # Mirror
ax2.set_xticks(dosticks)
ax2.set_xticklabels(dosticks)
#minor_locator = AutoMinorLocator(5)
#ax2.xaxis.set_minor_locator(minor_locator)
minorx2ticks=[4, 3, 2, 1]
ax2.set_xticks(minorx2ticks, minor = True)
# BS ticks
yticks=[-10, -5, 0, 5, 10, 15, 20]
minor_locator = AutoMinorLocator(5)
ax1.yaxis.set_minor_locator(minor_locator)
ax2.yaxis.set_minor_locator(minor_locator)
ax1.xaxis.tick_top()
#ax1.set_yticks(yticks)
#ax1.set_yticklabels(yticks)
# Mirror
ax1.yaxis.tick_right()
ax1.set_yticks(yticks)
ax1.set_yticklabels(yticks)
ax2.set_yticklabels([])
#plt.subplots_adjust(left=0.0, right=0.8)
plt.subplots_adjust(wspace = 0.0)
# Attempt to fill the area to the left of the DOS
# split values into positive and negative
alpha_fill=0.5
dosE1neg=[]
dosG1neg=[]
dosE1pos=[]
dosG1pos=[]
for i in range(0, len(dosE1)):
if(dosE1[i]<0.0):
dosE1neg.append(dosE1[i])
dosG1neg.append(dosG1[i])
else:
dosE1pos.append(dosE1[i])
dosG1pos.append(dosG1[i])
dosE1new =[y_min]+dosE1+[y_max]
dosG1new =[0.0]+dosG1+[0.0]
ax2.fill_between(dosG1new, 0, dosE1new, alpha=alpha_fill, linewidth=0.0, edgecolor='w')
# Vertical lines across BS plot
for i in range(0,numsympoints):
ax1.axvline(x=xticks[i], ymin=y_min, ymax=y_max, color='k', linewidth=0.25)
# Horizontal line at top of valence band
if (not Ef):
ax1.axhline(Ef, color="black", linestyle="--")
ax2.axhline(Ef, color="black", linestyle="--")
#plt.show()
plt.savefig(plotname, bbox_inches='tight')
return
def PlotMultipleDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of lists of reals
Ndos=len(dosE[:])
for i in range(0, Ndos):
plt.plot(dosG[i], dosE[i])
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.savefig(plotname)
return
#def WriteBandStructure():
# print (" %10.6f%10.6f%10.6f" % (kpoint[0], kpoint[1], kpoint[2]) )
############################################################################################
############################################################################################
############################################################################################
############################################################################################
############################ PROGRAM STARTS HERE ###################################
############################################################################################
############################################################################################
############################################################################################
############################################################################################
bohr2ang=0.52918
############
# Band structure
############
filename="ge.bands.dat"
nks = 0
nbnd=0
xk=[]
bsflt=[]
bs=[]
sympoints=['$L$','$\Gamma$', '$X$', '$W$', '$K$', '$\Gamma$']
sym_pt_dists=[0.5*math.sqrt(3), 1.0, 0.5, 0.25*math.sqrt(2), 0.75*math.sqrt(2)] ## distances between symmetry points (by hand)
nks_btw_sympoints=50
# Read from file and sort bs by bands
nks, nbnd, xk, bsflt = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs = SortByBands(nks, nbnd, bsflt)
print "nks=", nks
print "nbnd=", nbnd
# Create band objects
bands=[]
for ibnd in range (0, nbnd):
ledge = ibnd*nks
redge = ledge+nks
currentband = bs[ledge:redge]
bands.append( band(nks, currentband) )
# Scissor correction
# Si
###alat = 10.330495 # Bohr
###nval = 4 # for Si
###exptgap = 1.11 # eV # Si
# Ge
alat = 10.8171069 # Bohr
nval = 14 # for Ge with semicore
exptgap = 0.67 # Ge
# Convert to ANG and calculate BZV
alat=alat*bohr2ang
V=(alat**3)/4.0 # Good for FCC
bzv = (2.0*math.pi)**3/V
ncond = nbnd - nval
Scissor(nks, exptgap, bands, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
#############
# DOS
#############
filename='ge.bands_full.dat'
nks1, nbnd1, xk1, bsflt1 = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs1 = SortByBands(nks1, nbnd1, bsflt1)
print "nks=", nks1
print "nbnd=", nbnd1
# Create band objects
bands1=[]
for ibnd in range (0, nbnd1):
ledge1 = ibnd*nks1
redge1 = ledge1+nks1
currentband1 = bs1[ledge1:redge1]
bands1.append( band(nks1, currentband1) )
# Scissor correction
Scissor(nks1, exptgap, bands1, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
filename='ge.bands_243.dat'
nks2, nbnd2, xk2, bsflt2 = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs2 = SortByBands(nks2, nbnd2, bsflt2)
print "nks=", nks2
print "nbnd=", nbnd2
# Create band objects
bands2=[]
for ibnd in range (0, nbnd2):
ledge2 = ibnd*nks2
redge2 = ledge2+nks2
currentband2 = bs2[ledge2:redge2]
bands2.append( band(nks2, currentband2) )
# Scissor correction
Scissor(nks2, exptgap, bands2, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
# Generate DOSs
deltaE = 0.03 #eV
dosE1, dosG1 = CreateDOS(nks1, nbnd1, bzv, -13.0, 25.0, deltaE, bands1, 0)
dosE2, dosG2 = CreateDOS(nks2, nbnd2, bzv, -13.0, 25.0, deltaE, bands2, 0)
# Plot
#PlotDOS(dosE, dosG, "DOS.pdf")
#PlotBandStructure(nbnd, nval, bands, "BS.pdf", 0.0, sympoints, nks_btw_sympoints)
PlotBnDD(nbnd, nval, bands, 0.0, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, "BSnDOS.pdf")
# DOS
#mydos=dos()
#mydos.Load('dos_full.dat')
#mydos.Printout()
| gpl-2.0 |
TemoaProject/temoa | data_processing/MakeOutputPlots.py | 1 | 12275 | import sqlite3, sys
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt, cm as cmx, colors
from IPython import embed as IP
import numpy as np
import random
import time
import os
import argparse
class OutputPlotGenerator:
def __init__(self, path_to_db, region, scenario, super_categories=False):
self.db_path = os.path.abspath(path_to_db)
if region == 'global':
self.region = '%'
else:
self.region = region
self.scenario = scenario
self.folder_name =os.path.splitext(os.path.basename(path_to_db))[0] + "_" + region + "_" + scenario + "_plots"
# self.extractFromDatabase()
def extractFromDatabase(self, type):
'''
Based on the type of the plot being generated, extract data from the corresponding table from database
'''
con = sqlite3.connect(self.db_path)
cur = con.cursor()
if (type == 1):
cur.execute("SELECT sector, t_periods, tech, capacity FROM Output_CapacityByPeriodAndTech WHERE scenario == '"+self.scenario+"' AND regions LIKE '"+self.region+"'")
self.capacity_output = cur.fetchall()
self.capacity_output = [list(elem) for elem in self.capacity_output]
elif (type == 2):
cur.execute("SELECT sector, t_periods, tech, SUM(vflow_out) FROM Output_VFlow_Out WHERE scenario == '"+self.scenario+"' AND regions LIKE '"+self.region+"' GROUP BY sector, t_periods, tech")
self.output_vflow = cur.fetchall()
self.output_vflow = [list(elem) for elem in self.output_vflow]
elif (type == 3):
cur.execute("SELECT sector, t_periods, emissions_comm, SUM(emissions) FROM Output_Emissions WHERE scenario == '"+self.scenario+"' AND regions LIKE '"+self.region+"' GROUP BY sector, t_periods, emissions_comm")
self.output_emissions = cur.fetchall()
self.output_emissions = [list(elem) for elem in self.output_emissions]
cur.execute("SELECT tech, tech_category FROM technologies")
self.tech_categories = cur.fetchall()
self.tech_categories = [[str(word) for word in tuple] for tuple in self.tech_categories]
con.close()
def getSectors(self, type):
'''
Based on the type of the plot being generated, returns a list of sectors available in the database
'''
self.extractFromDatabase(type)
sectors = set()
data = None
if (type == 1):
data = self.capacity_output
elif (type == 2):
data = self.output_vflow
elif (type == 3):
data = self.output_emissions
for row in data:
sectors.add(row[0])
res = list(sectors)
res.insert(0,'all')
return res
def processData(self,inputData, sector, super_categories=False):
'''
Processes data for a particular sector to make it ready for plotting purposes
'''
periods = set()
techs = set()
for row in inputData:
row[0] = str(row[0])
row[1] = int(row[1])
row[2] = str(row[2])
row[3] = float(row[3])
tech_dict = dict(self.tech_categories)
if (super_categories):
for row in inputData:
row[2] = tech_dict.get(row[2],row[2])
for row in inputData:
if (row[0] == sector or sector=='all'):
periods.add(row[1]) # Reminder: indexing starts at 0
techs.add(row[2])
periods = list(periods)
techs = list(techs)
periods.sort()
output_values = dict() # Each row in a dictionary is a list
for tech in techs:
if tech == 'None' or tech == '':
continue
output_values[tech] = [0]*len(periods) #this just creates a blank table
for row in inputData:
if row[2] == 'None' or row[2] == '':
continue
if (row[0] == sector or sector=='all'):
output_values[row[2]][periods.index(row[1])] += row[-1]
output_values['periods']=periods
return output_values
def handleOutputPath(self, plot_type, sector, super_categories, output_dir):
outfile = plot_type+'_'+sector#+'_'+str(int(time.time()*1000))+'.png'
if (super_categories):
outfile += '_merged'
outfile += '.png'
outfile2 = os.path.join(self.folder_name, outfile)
output_dir = os.path.join(output_dir, self.folder_name)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
self.output_file_name = os.path.join(output_dir, outfile)
self.output_file_name = self.output_file_name.replace(" ", "")
return outfile2
def generatePlotForCapacity(self,sector, super_categories=False, output_dir = '.'):
'''
Generates Plot for Capacity of a given sector
'''
outfile2 = self.handleOutputPath('capacity', sector, super_categories, output_dir)
if (os.path.exists(self.output_file_name)):
print("not generating new capacity plot")
return outfile2
sectors = self.getSectors(1)
if (not (sector in sectors)):
return ""
output_values = self.processData(self.capacity_output, sector, super_categories)
if self.region == '%':
title = 'Capacity Plot for ' + sector + ' across all regions'
else:
title = 'Capacity Plot for ' + sector + ' sector in region ' + self.region
self.makeStackedBarPlot(output_values, "Years", "Capacity ", 'periods', title)
return outfile2
def generatePlotForOutputFlow(self, sector, super_categories=False, output_dir = '.'):
'''
Generates Plot for Output Flow of a given sector
'''
outfile2 = self.handleOutputPath('flow', sector, super_categories, output_dir)
if (os.path.exists(self.output_file_name)):
print("not generating new flow plot")
return outfile2
sectors = self.getSectors(2)
if (not (sector in sectors)):
return ""
output_values = self.processData(self.output_vflow, sector, super_categories)
if self.region == '%':
title = 'Output Flow Plot for ' + sector + ' across all regions'
else:
title = 'Output Flow Plot for ' + sector + ' sector in region ' + self.region
self.makeStackedBarPlot(output_values, "Years", "Activity ", 'periods', title)
return outfile2;
def generatePlotForEmissions(self, sector, super_categories=False, output_dir = '.'):
'''
Generates Plot for Emissions of a given sector
'''
outfile2 = self.handleOutputPath('emissions', sector, super_categories, output_dir)
if (os.path.exists(self.output_file_name)):
print("not generating new emissions plot")
return outfile2
sectors = self.getSectors(3)
if (not (sector in sectors)):
return ""
output_values = self.processData(self.output_emissions, sector, super_categories)
if self.region == '%':
title = 'Emissions Plot for ' + sector + ' across all regions'
else:
title = 'Emissions Plot for ' + sector + ' sector in region ' + self.region
self.make_line_plot(output_values.copy(), 'Emissions', title)
return outfile2;
'''
--------------------------- Plot Generation related functions --------------------------------------
'''
def get_random_color(self, pastel_factor = 0.5):
return [(x+pastel_factor)/(1.0+pastel_factor) for x in [random.uniform(0,1.0) for i in [1,2,3]]]
def color_distance(self, c1,c2):
return sum([abs(x[0]-x[1]) for x in zip(c1,c2)])
def get_cmap(self, N):
'''Returns a function that maps each index in 0, 1, ... N-1 to a distinct
RGB color.'''
color_norm = colors.Normalize(vmin=0, vmax=N-1)
# More colormaps: https://matplotlib.org/examples/color/colormaps_reference.html
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='viridis')
def map_index_to_rgb_color(index):
return scalar_map.to_rgba(index)
return map_index_to_rgb_color
def generate_new_color(self, existing_colors,pastel_factor = 0.5):
max_distance = None
best_color = None
for i in range(0,100):
color = self.get_random_color(pastel_factor = pastel_factor)
if not existing_colors:
return color
best_distance = min([self.color_distance(color,c) for c in existing_colors])
if not max_distance or best_distance > max_distance:
max_distance = best_distance
best_color = color
return best_color
def makeStackedBarPlot(self, data, xlabel, ylabel, xvar, title):
random.seed(10)
handles = list()
xaxis=data[xvar]
data.pop('c',0)
data.pop(xvar,0)
stackedBars = data.keys()
colorMapForBars=dict()
colors = []
plt.figure()
cmap = self.get_cmap( len(stackedBars) )
for i in range(0,len(stackedBars)):
# colors.append(self.generate_new_color(colors,pastel_factor = 0.9))
# colorMapForBars[data.keys()[i]]=colors[i]
colorMapForBars[list(data.keys())[i]]=cmap(i)
width = min([xaxis[i+1] - xaxis[i] for i in range(0, len(xaxis)-1)])/2.0
b = [0]*len(xaxis)
#plt.figure()
for bar in stackedBars:
h = plt.bar(xaxis, data[bar], width, bottom = b, color = colorMapForBars[bar])
handles.append(h)
b = [b[j] + data[bar][j] for j in range(0, len(b))]
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# plt.xticks([width*0.5 + i for i in xaxis], [str(i) for i in xaxis])
plt.xticks([i for i in xaxis], [str(i) for i in xaxis])
plt.title(title)
lgd = plt.legend([h[0] for h in handles], stackedBars, bbox_to_anchor = (1.2, 1),fontsize=7.5)
#plt.show()
plt.savefig(self.output_file_name, bbox_extra_artists=(lgd,), bbox_inches='tight')
def make_line_plot(self, plot_var, label, title):
handles = list()
periods=plot_var['periods']
plot_var.pop('periods',0)
techs = plot_var.keys()
random.seed(10)
color_map=dict()
colors = []
width = 1.5
plt.figure()
cmap = self.get_cmap( len(techs) )
for i in range(0,len(techs)):
# colors.append(self.generate_new_color(colors,pastel_factor = 0.9))
# color_map[plot_var.keys()[i]]=colors[i]
color_map[plot_var.keys()[i]]=cmap(i)
b = [0]*len(periods)
for tech in techs:
h = plt.plot(periods, plot_var[tech],color = color_map[tech], linestyle='--', marker='o')
handles.append(h)
plt.xlabel("Years")
plt.ylabel(label)
#plt.xticks([i + width*0.5 for i in periods], [str(i) for i in periods])
plt.xticks(periods)
plt.title(title)
lgd = plt.legend([h[0] for h in handles], techs, bbox_to_anchor = (1.2, 1),fontsize=7.5)
#plt.show()
plt.savefig(self.output_file_name, bbox_extra_artists=(lgd,), bbox_inches='tight')
# Function used for command line purposes. Parses arguments and then calls relevent functions.
def GeneratePlot(args):
parser = argparse.ArgumentParser(description="Generate Output Plot")
parser.add_argument('-i', '--input', action="store", dest="input", help="Input Database Filename <path>", required=True)
parser.add_argument('-r', '--region', action="store", dest="region", help="Region name, input 'global' if global results are desired", required=True)
parser.add_argument('-s', '--scenario', action="store", dest="scenario", help="Model run scenario name", required=True)
parser.add_argument('-p', '--plot-type', action="store", dest="type", help="Type of Plot to be generated", choices=['capacity', 'flow', 'emissions'], required=True)
parser.add_argument('-c', '--sector', action="store", dest="sector", help="Sector for which plot to be generated", required=True)
parser.add_argument('-o', '--output', action="store", dest="output_dir", help='Output plot location', default='./')
parser.add_argument('--super', action="store_true", dest="super_categories", help="Merge Technologies or not", default=False)
options = parser.parse_args(args)
result = OutputPlotGenerator(options.input, options.region, options.scenario, options.super_categories)
error = ''
if (options.type == 'capacity'):
error = result.generatePlotForCapacity(options.sector, options.super_categories, options.output_dir)
elif (options.type == 'flow'):
error = result.generatePlotForOutputFlow(options.sector, options.super_categories, options.output_dir)
elif (options.type == 'emissions'):
error = result.generatePlotForEmissions(options.sector, options.super_categories, options.output_dir)
if (error == ''):
print("Error: The sector doesn't exist for the selected plot type and database")
else:
print("Done. Look for output plot images in directory:"+os.path.join(options.output_dir,error))
if __name__ == '__main__':
GeneratePlot(sys.argv[1:]) | gpl-2.0 |
mbilalzafar/fair-classification | fair_classification/stats_pref_fairness.py | 1 | 5732 | from __future__ import division
import numpy as np
from sklearn.preprocessing import MaxAbsScaler # normalize data with 0 and 1 as min/max absolute vals
import scipy
from multiprocessing import Pool, Process, Queue
from sklearn.metrics import roc_auc_score
import traceback
def get_acc_all(dist_arr, y):
"""
Get accuracy for all data points
Each group gets the prediction based on their own boundary
"""
return np.sum(sign_bin_clf(dist_arr) == y) / y.shape[0]
def get_clf_stats(dist_arr, dist_dict, y, x_sensitive, print_stats=False):
# compute the class labels
all_class_labels_assigned = sign_bin_clf(dist_arr)
s_val_to_cons_sum = {}
acc = get_acc_all(dist_arr,y)
if print_stats:
print "\n\n\nAccuracy: %0.3f\n" % acc
acc_stats = get_acc_stats(dist_dict, y, x_sensitive, print_stats)
s_val_to_cons_sum = get_sensitive_attr_cov(dist_dict)
return acc, s_val_to_cons_sum, acc_stats
def get_fp_fn_tp_tn(y_true, y_pred):
def check_labels_bin(arr):
""" Can only have -1 and 1"""
try:
if len(set(arr)) == 1:
elem = list(set(arr))[0]
assert(elem==1 or elem==-1)
else:
assert(len(set(arr)) == 2)
assert( sorted(list(set(arr)))[0] == -1 and sorted(list(set(arr)))[1] == 1 )
except:
traceback.print_exc()
raise Exception("Class labels (both true and predicted) can only take values -1 and 1... Exiting...")
check_labels_bin(y_true)
check_labels_bin(y_pred)
acc = float(sum(y_true==y_pred)) / len(y_true)
fp = sum(np.logical_and(y_true == -1.0, y_pred == +1.0)) # something which is -ve but is misclassified as +ve
fn = sum(np.logical_and(y_true == +1.0, y_pred == -1.0)) # something which is +ve but is misclassified as -ve
tp = sum(np.logical_and(y_true == +1.0, y_pred == +1.0)) # something which is +ve AND is correctly classified as +ve
tn = sum(np.logical_and(y_true == -1.0, y_pred == -1.0)) # something which is -ve AND is correctly classified as -ve
fpr = float(fp) / float(fp + tn)
fnr = float(fn) / float(fn + tp)
tpr = float(tp) / float(tp + fn)
tnr = float(tn) / float(tn + fp)
frac_pos = (tp + fp) / (tp + tn + fp + fn) # fraction classified as positive
out_dict = {"fpr": fpr, "fnr": fnr, "acc": acc, "frac_pos": frac_pos}
return out_dict
def get_acc_stats(dist_dict, y, x_sensitive, verbose = False):
"""
output dict form: s_attr_group (0/1) -> w_group (0/1) -> fpr/fnr/acc/frac_pos
"""
acc_stats = {}
try:
assert(len(set(x_sensitive)) == 2)
except:
raise Exception("Fill the constraint code for categorical sensitive features... Exiting...")
try:
assert( sorted(list(set(x_sensitive)))[0] == 0 and sorted(list(set(x_sensitive)))[1] == 1 )
except:
raise Exception("Sensitive feature can only take values 0 and 1... Exiting...")
if verbose == True:
print "|| s || frac_pos ||"
for s_val in set(x_sensitive):
idx = x_sensitive == s_val
other_val = np.abs(1-s_val)
acc_stats[s_val] = {}
y_true_local = y[idx]
y_pred_local = sign_bin_clf(dist_dict[s_val][s_val]) # predictions with this classifier
y_pred_local_other = sign_bin_clf(dist_dict[s_val][other_val]) # predictions with other group's classifier
assert(y_true_local.shape[0] == y_pred_local.shape[0] and y_true_local.shape[0] == y_pred_local_other.shape[0])
acc_stats[s_val][s_val] = get_fp_fn_tp_tn(y_true_local, y_pred_local)
acc_stats[s_val][other_val] = get_fp_fn_tp_tn(y_true_local, y_pred_local_other)
if verbose == True:
if isinstance(s_val, float): # print the int value of the sensitive attr val
s_val = int(s_val)
print "|| %s || %0.2f (%0.2f) ||" % (s_val, acc_stats[s_val][s_val]["frac_pos"], acc_stats[s_val][other_val]["frac_pos"])
return acc_stats
def sign_bin_clf(arr):
"""
prediction for a linear classifier. np.sign gives 0 for sing(0), we want 1
if arr[i] >= 0, arr[i] = +1
else arr[i] = -1
"""
arr = np.sign(arr)
arr[arr==0] = 1
return arr
def get_sensitive_attr_cov(dist_dict):
"""
computes the ramp function for each group to estimate the acceptance rate
"""
s_val_to_cons_sum = {0:{}, 1:{}} # s_attr_group (0/1) -> w_group (0/1) -> ramp approx
for s_val in dist_dict.keys():
for w_group in dist_dict[s_val].keys():
fx = dist_dict[s_val][w_group]
s_val_to_cons_sum[s_val][w_group] = np.sum( np.maximum(0, fx) ) / fx.shape[0]
return s_val_to_cons_sum
def add_intercept(x):
""" Add intercept to the data before linear classification """
m,n = x.shape
intercept = np.ones(m).reshape(m, 1) # the constant b
return np.concatenate((intercept, x), axis = 1)
def scale_data(x_train, x_test):
"""
We only scale the continuous features. No need to scale binary features
"""
idx_binary = [] # columns with boolean values
for k in range(x_train.shape[1]):
idx_binary.append( np.array_equal(x_train[:,k], x_train[:,k].astype(bool)) ) # checking if a column is binary
idx_cont = np.logical_not(idx_binary)
sc = MaxAbsScaler()
sc.fit(x_train[:, idx_cont])
x_train[:, idx_cont] = sc.transform(x_train[:, idx_cont])
x_test[:, idx_cont] = sc.transform(x_test[:, idx_cont])
return
| gpl-3.0 |
jyhmiinlin/cineFSE | CsSolver/Cine2DSolver.py | 1 | 39069 | import numpy
# import pyfftw
import scipy
import numpy.fft as fftpack
# import fftpack._fftpack
# import CsSolver
# import CsSolver
import sys
# Add the ptdraft folder path to the sys.path list
sys.path.append('../CsTransform')
import CsTransform.pynufft
import matplotlib.pyplot
cmap=matplotlib.cm.gray
norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
from numpy import arange as xrange
class TemporalConstraint(CsTransform.pynufft.pynufft):
def __init__(self,om, Nd, Kd,Jd):
CsTransform.pynufft.pynufft.__init__(self,om, Nd, Kd,Jd)
self.gpu_flag = 0
# self.compute = 'fast'
# self.compute = 'reliable'
'''
2D cine images + PPG external guided
X-axis is not iterated, so it is only a 2D sparse recovery problem
data space: sparse ky-t (f, )
target space: y-f space (u, u0)
Remark: f is the data (kx,ky,t) which are sparse, and it has been subtracted for
average images.
u is the y-f space
u_bar is the DC of y-f space
u0 the first estimation of
CsTransform.forward: transform(y-f -> ky-t )
CsTransform.backward: transform(ky-t -> y-f )
self.make_sense: making sensitivity map from TSE images
input: x-y-coil 3D data
output: x-y-f-coil 4D sensemap
constraints: 1) |u- u_bar|_1: the L1 of |signals - DC| terms
2) |part_der_y(u)|_1: the L1 of partial derive of u
'''
# def __init__(self,CsTransform, data, mu, LMBD, gamma, nInner, nBreg):
# CineBaseSolver.__init__(CsTransform, data, mu, LMBD, gamma, nInner, nBreg)
def data2rho(self, dd_raw,dim_x,dim_y,dim_t,n_coil):
'''
dd_raw data in kx-ky-t space
dd_raw, shape (512(kx), 1056(ky-t), 4coil)
'''
if dd_raw.ndim != 3:
print(' dimension is wrong! ')
dd_raw = numpy.transpose(dd_raw,(1,2,0)) # put the x-axis to the tail
shape_dd_raw = numpy.shape(dd_raw)
dd_raw = numpy.reshape(dd_raw,
(shape_dd_raw[0],)+ (shape_dd_raw[1]*shape_dd_raw[2],),
order='F')
print('in data2rho,dd_raw.shape',dd_raw.shape)
output_data = self.backward(dd_raw) # (dim_y, dim_t*n_coil*dim_x)
output_data = numpy.reshape(output_data, (dim_y,dim_t,n_coil,dim_x),order='F')
output_data = numpy.transpose(output_data, (3,0,1,2))
return output_data
def FhF(self, dd_raw ,cineObj):
'''
2D FFT along y-f axes
x-axis is not changed
'''
if dd_raw.ndim != 4:
print(' dimension is wrong! ')
dim_x = cineObj.dim_x
dim_y = self.st['Nd'][0]
dim_t = self.st['Nd'][1]
n_coil = cineObj.ncoils
# CsTransform = self.CsTransform
dd_raw = numpy.transpose(dd_raw,(1,2,3,0)) # put the x-axis to the tail
shape_dd_raw = numpy.shape(dd_raw)
dd_raw = numpy.reshape(dd_raw,
(shape_dd_raw[0],)+ (shape_dd_raw[1],)+(shape_dd_raw[2]*shape_dd_raw[3],),
order='F')
output_data = self.forwardbackward(dd_raw)
# output_data = CsTransform.forward(dd_raw) # (dim_y, dim_t*n_coil*dim_x)
#
# output_data = CsTransform.backward(output_data)
#output_data = CsTransform.backward(output_data)
output_data = numpy.reshape(output_data, (dim_y,dim_t,n_coil,dim_x),order='F')
output_data = numpy.transpose(output_data, (3,0,1,2))
return output_data
def fun1(self,cineObj):
'''
CsTransform.backward
from data to kx-y-f
do I need this?
by connecting to data2rho(self, dd_raw,dim_x,dim_y,dim_t,n_coil,CsTransform):
'''
output=self.data2rho( cineObj.f , cineObj.dim_x , self.st['Nd'][0],self.st['Nd'][1],cineObj.ncoils)
print('cineObj.f shape',cineObj.f.shape)
return output
def fun2(self,m):
'''
1)shift-kx
2)ifft: along kx
3)shift-x
'''
m = fftpack.ifftshift(m, axes=(0,)) # because kx energy is at centre
shape_m = numpy.shape(m)
m= fftpack.ifftn(m, axes=(0,), )
# m = fftpack.ifftn(m,axes=(0,))
m = fftpack.ifftshift(m, axes=(0,)) # because x-y-f are all shifted at center
return m
def fun3(self,m):
'''
1)shift-f
2)fft: along f
3)(No shift)-t
'''
m = fftpack.fftshift(m, axes=(2,))
m= fftpack.fftn(m, axes=(2,), )
# m = fftpack.fftn(m,axes=(2,))
return m
def fun4(self,m):
'''
1D IFFT along temporal/frequency axis
inverse of self.fun3
1)(No shift)-t
2)fft: along f
3)shift-f
'''
m= fftpack.ifftn(m, axes=(2,), )
# m = fftpack.ifftn(m,axes=(2,))
m = fftpack.ifftshift(m, axes=(2,))
return m
# def fast_fun4_FhF_fun3(self,m, cineObj):
# m = fftpack.ifftn(m,axes=(2,))
#
# m = fftpack.ifftshift(m, axes=(2,))
#
# if m.ndim != 4:
# print(' dimension is wrong! ')
#
# dim_x = cineObj.dim_x
# dim_y = self.st['Nd'][0]
# dim_t = self.st['Nd'][1]
# n_coil = cineObj.ncoils
# # CsTransform = self.CsTransform
#
# m = numpy.transpose(m,(1,2,3,0)) # put the x-axis to the tail
#
# shape_m = numpy.shape(m)
#
# m = numpy.reshape(m,
# (shape_m[0],)+ (shape_m[1],)+(shape_m[2]*shape_m[3],),
# order='F')
# m = self.forwardbackward(m)
#
# # output_data = CsTransform.forward(dd_raw) # (dim_y, dim_t*n_coil*dim_x)
# #
# # output_data = CsTransform.backward(output_data)
#
# #output_data = CsTransform.backward(output_data)
#
# m = numpy.reshape(m, (dim_y,dim_t,n_coil,dim_x),order='F')
#
# m = numpy.transpose(m, (3,0,1,2))
#
# m = fftpack.fftshift(m, axes=(2,))
#
# m = fftpack.fftn(m,axes=(2,))
#
# return m
def do_FhWFm(self,q,cineObj):
'''
do_FhWFm convole the input q with the ky-t sampling function
firstly, fun4() transform the x-y-t data into x-y-f
Secondly, FhF() perform 2D convolution for x-y-f data
Thirdly, fun3() transform the x-y-f data back into x-y-t
input:
q is the x-y-t data
output:
output_q is the x-y-t data
'''
# if self.compute == 'reliable':
output_q = q
# 1D FFT along temporal/frequency axis
output_q = self.fun4(output_q)
# convolved by the
output_q = self.FhF(output_q,cineObj)
output_q = self.fun3(output_q)
# elif self.compute == 'fast':
# output_q = q
# output_q = self.fast_fun4_FhF_fun3(output_q, cineObj)
return output_q
def fun5(self,m):
'''
1)shift-f:
2)shift-x-y:
2)fft2 along x-y, no shift -> kx ky f
for 3D laplacian operator
'''
m = fftpack.ifftshift(m, axes=(0,1,2,))
m= fftpack.fftn(m, axes=(0,1,), )
# m = fftpack.fftn(m,axes = (0,1,))
return m
def fun6(self,m):
'''
inverse of fun5
1)ifft2 along kx-ky
2)shift along x & y
3)shift along f
'''
# m = fftpack.ifftn(m,axes = (0,1,))
m= fftpack.ifftn(m, axes=(0,1,), )
m = fftpack.fftshift(m, axes=(0,1,2,))
return m
def do_laplacian(self,q,uker):
'''
Laplacian of the input q
'''
# lapla_Wq=fftpack.fftshift(q,axes=(2,))
# lapla_Wq=fftpack.fftn(lapla_Wq,axes=(0,1,))
# lapla_Wq=lapla_Wq*uker
# lapla_Wq=fftpack.ifftn(lapla_Wq,axes=(0,1,))
# lapla_Wq=fftpack.ifftshift(q,axes=(2,))
lapla_Wq = q
lapla_Wq = self.fun4(lapla_Wq)
lapla_Wq = self.fun5(lapla_Wq)
lapla_Wq = lapla_Wq * uker
lapla_Wq = self.fun6(lapla_Wq)
lapla_Wq = self.fun3(lapla_Wq)
return lapla_Wq
def pseudoinverse(self, cineObj, mu, LMBD, gamma, nInner, nBreg): # main function of solver
self.mu = mu
self.LMBD = LMBD
self.gamma = gamma
self.nInner= nInner
self.nBreg= nBreg
u0=numpy.empty((cineObj.dim_x, self.st['Nd'][0], self.st['Nd'][1], cineObj.tse.shape[2]))
print(numpy.shape(cineObj.tse))
orig_num_ky=numpy.shape(cineObj.tse)[1]
tse = cineObj.tse[:,orig_num_ky/2 - self.st['Nd'][0]/2 : orig_num_ky/2 + self.st['Nd'][0]/2,:]
tse = CsTransform.pynufft.appendmat(tse,u0.shape[2])
tse = numpy.transpose(tse,(0,1,3,2))
print(self.st['Nd'][0])
print('tse.shape',tse.shape)
#===============================================================================
# mask
#===============================================================================
self.st = self.create_mask(u0)
print('mask.shape',self.st['mask'].shape)
# for jj in range(0,16):
# matplotlib.pyplot.subplot(4,4,jj)
# matplotlib.pyplot.imshow(self.st['mask'][...,jj,0].real)
# matplotlib.pyplot.show()
#===============================================================================
#estimate sensitivity maps by divided by rms images
self.st = self._make_sense(cineObj.tse[:,orig_num_ky/2 - self.st['Nd'][0]/2 : orig_num_ky/2 + self.st['Nd'][0]/2,:]) # setting up sense map in st['sensemap']
self.st['sensemap'] = CsTransform.pynufft.appendmat(self.st['sensemap'],u0.shape[2])
self.st['sensemap'] = numpy.transpose(self.st['sensemap'],(0,1,3,2))
#self.st['sensemap'] =self.st['sensemap'] * self.st['mask']
print('self.sense.shape',self.st['sensemap'].shape)
# for jj in range(0,16):
# matplotlib.pyplot.subplot(4,4,jj)
# matplotlib.pyplot.imshow(numpy.abs(self.st['sensemap'][...,jj,0]))
# matplotlib.pyplot.show()
self.st['senseflag']=1 # turn-on sense, to get sensemap
(u,uf)=self.kernel( cineObj, self.st , mu, LMBD, gamma, nInner, nBreg)
return u
def create_laplacian_kernel2(self,cineObj):
#===============================================================================
# # # Laplacian oeprator, convolution kernel in spatial domain
# Note only the y-axis is used
# # related to constraint
#===============================================================================
uker = numpy.zeros((cineObj.dim_x,)+self.st['Nd'][0:2],dtype=numpy.complex64)
rows_kd = self.st['Nd'][0] # ky-axis
#cols_kd = self.st['Kd'][1] # t-axis
# uker[0,0] = 1.0
uker[0,0,0] = -4.0
uker[0,1,0]=1.0
uker[0,-1,0]=1.0
uker[1,0,0]=1.0
uker[-1,0,0]=1.0
uker = fftpack.fftn(uker,axes=(0,1,2,)) # 256x256x16
return uker
def create_laplacian_kernel3(self,cineObj):
#===============================================================================
# # # Laplacian oeprator, convolution kernel in spatial domain
# Note only the y-axis is used
# # related to constraint
#===============================================================================
uker = numpy.ones((cineObj.dim_x,)+self.st['Nd'][0:2],dtype=numpy.complex64)
# rows_kd = self.st['Nd'][0] # ky-axis
# #cols_kd = self.st['Kd'][1] # t-axis
## uker[0,0] = 1.0
# uker[0,0,0] = -2.0
# uker[0,0,1]=1.0
# uker[0,0,-1]=1.0
#
# uker = fftpack.fftn(uker,axes=(0,1,2,)) # 256x256x16
for pp in range(0,self.st['Nd'][1]):
uker[:,:,pp] =uker[:,:,pp] * ( (pp - self.st['Nd'][1]/2 )**2 ) / (self.st['Nd'][1]**2)
# for pp in range(0,16):
# matplotlib.pyplot.subplot(4,4,pp)
# matplotlib.pyplot.imshow(numpy.abs(uker[pp,:,:]),interpolation='nearest')
# matplotlib.pyplot.show()
return uker
def create_laplacian_kernel(self,cineObj):
#===============================================================================
# # # Laplacian oeprator, convolution kernel in spatial domain
# Note only the y-axis is used
# # related to constraint
#===============================================================================
uker2 = numpy.zeros((cineObj.dim_x,)+self.st['Nd'][0:2],dtype=numpy.complex64)
rows_kd = self.st['Nd'][0] # ky-axis
#cols_kd = self.st['Kd'][1] # t-axis
# uker[0,0] = 1.0
uker2[0, 0, 0] = -2.0
uker2[0, 0, 1]=1.0
uker2[0, 0,-1]=1.0
uker2 = fftpack.fftn(uker2,axes=(0,1,2,)) # 256x256x16
return uker2
def create_kspace_sampling_density(self):
#=======================================================================
# RTR: k-space sampled density
# only diagonal elements are relevant (on k-space grids)
#=======================================================================
RTR=self.st['q'] # see __init__() in class "pyNufft"
#(prod(Kd),1)
return RTR
def kernel(self, cineObj, st , mu, LMBD, gamma, nInner, nBreg):
self.st['sensemap']=self.st['sensemap']*self.st['mask']
orig_num_ky=numpy.shape(cineObj.tse)[1]
tse = cineObj.tse[:,orig_num_ky/2 - self.st['Nd'][0]/2 : orig_num_ky/2 + self.st['Nd'][0]/2,:]
# tse=cineObj.tse
# tse=numpy.abs(numpy.mean(self.st['sensemap'],-1))
tse=CsTransform.pynufft.appendmat(tse,self.st['Nd'][1])
#tse=Normalize(tse)
tse=numpy.transpose(tse,(0,1,3,2))
self.ttse=tse#CsTransform.pynufft.Normalize(tse)
self.tse0 = CsTransform.pynufft.CombineMulti(tse, -1)
print('line392, shape self.tse0',numpy.shape(self.tse0))
self.filter= numpy.ones(tse.shape)
dpss = numpy.kaiser(tse.shape[1], 1.0)*10.0
for ppp in range(0,tse.shape[1]):
self.filter[:,ppp,:,:]=self.filter[:,ppp,:,:]*dpss[ppp]
print('tse.shape',tse.shape)
# L= numpy.size(f)/st['M']
# image_dim=st['Nd']+(L,)
#
# if numpy.ndim(f) == 1:# preventing row vector
# f=numpy.reshape(f,(numpy.shape(f)[0],1),order='F')
# f0 = numpy.copy(f) # deep copy to prevent scope f0 to f
## u = numpy.zeros(image_dim,dtype=numpy.complex64)
f0=numpy.copy(cineObj.f)
f=numpy.copy(cineObj.f)
# u0=self.data2rho(f_internal,
# cineObj.dim_x,
# self.st['Nd'][0],
# self.st['Nd'][1],
# cineObj.ncoils,
# self.CsTransform
# ) # doing spatial transform
u0 = self.fun1(cineObj)
pdf = cineObj.pdf
pdf = CsTransform.pynufft.appendmat(pdf,self.st['Nd'][1])
pdf = numpy.transpose(pdf,(0,1,3,2))
# u0 = fftpack.fftn(u0,axes=(1,))
# u0 = fftpack.fftshift(u0,axes=(1,))
# #u0[:,:,u0.shape[2]/2,:] = u0[:,:,u0.shape[2]/2,:]/pdf[:,:,u0.shape[2]/2,:]
# u0 = u0#/pdf
# u0 = fftpack.ifftshift(u0,axes=(1,))
# u0 = fftpack.ifftn(u0,axes=(1,))
# print('cineObj.pdf.shape',cineObj.pdf.shape)
# for pj in range(0,4):
# matplotlib.pyplot.imshow(cineObj.pdf[:,:,pj].real)
# matplotlib.pyplot.show()
u0=self.fun2(u0)
u0=self.fun3(u0)
u0 = u0*self.st['sensemap'].conj()
u0 = CsTransform.pynufft.CombineMulti(u0,-1)
print('line443, shape u0',numpy.shape(u0))
#u0 = u0*self.filter
uker = self.create_laplacian_kernel(cineObj)
uker = CsTransform.pynufft.appendmat(uker,u0.shape[3])
self.u0 = u0
u = numpy.copy(self.tse0)
print('u0.shape',u0.shape)
(xx,bb,dd)=self.make_split_variables(u)
uf = numpy.copy(u) # only used for ISRA, written here for generality
murf = numpy.copy(u) # initial values
# #===============================================================================
#u_stack = numpy.empty(st['Nd']+(nBreg,),dtype=numpy.complex)
for outer in xrange(0,nBreg):
for inner in xrange(0,nInner):
# update u
print('iterating',[inner,outer])
#===============================================================
# update u # simple k-space deconvolution to guess initial u
u = self.update_u(murf, u, uker, xx, bb,cineObj)
c = numpy.max(numpy.abs(u[:])) # Rough coefficient
# to correct threshold of nonlinear shrink
#===================================================================
# # update d
#===================================================================
#===================================================================
# Shrinkage: remove tiny values "in somewhere sparse!"
# dx+bx should be sparse!
#===================================================================
# shrinkage
#===================================================================
dd=self.update_d(u,dd)
xx=self.shrink( dd, bb, c*1.0/LMBD/numpy.sqrt(numpy.prod(st['Nd'])))
#===============================================================
#===================================================================
# # update b
#===================================================================
bb=self._update_b(bb, dd, xx)
if outer < (nBreg-1): # do not update in the last loop
(f, uf, murf,u)=self.external_update(u, f, uf, f0, u0) # update outer Split_bregman
# u = CsTransform.pynufft.Normalize(u)
# for pp in range(0,u0.shape[2]):
# matplotlib.pyplot.subplot(numpy.sqrt(u0.shape[2])+1,numpy.sqrt(u0.shape[2])+1,pp)
# matplotlib.pyplot.imshow(numpy.sum(numpy.abs(u[...,pp,:]),-1),norm=norm,interpolation='nearest')
# matplotlib.pyplot.show()
#
return (u,uf)
def update_u(self,murf,u, uker ,xx, bb,cineObj):
# u_bar = numpy.copy(u)
#
#
# u_bar[...,:u.shape[2]/2,:] = 0.0
# u_bar[...,(u.shape[2]/2+1):,:] = 0.0
Fhd = self.u0
# (self.u0
# - self.FhF(u_bar)#*self.st['sensemap'].conj()
# )
m = u# - u_bar
rhs = Fhd #+ self.constraint(xx,bb) # LMBD/gamma have been added
for jj in range(0,1):
m = self.cg_step(rhs,m,uker,10,cineObj)
u = m
return u
def cg_step(self,rhs,m,uker,n_iter,cineObj):
# going through the acquisition process
FhFWm = self.do_FhWFm(m*self.st['sensemap'],cineObj)*self.st['sensemap'].conj()
FhFWm = CsTransform.pynufft.CombineMulti(FhFWm,-1)
# Laplacian of the x-y-t data
lapla_m = self.do_laplacian(m, uker)
# Gradient
lhs = FhFWm - self.LMBD*lapla_m + 2.0*self.gamma*m
#C_m= lhs - rhs
r = rhs - lhs
p = r
for pp in range(0,n_iter):
Ap = self.do_FhWFm(p*self.st['sensemap'],cineObj)*self.st['sensemap'].conj()
Ap = CsTransform.pynufft.CombineMulti(Ap,-1)
Ap = Ap - self.LMBD*self.do_laplacian(p, uker) + 2.0*self.gamma*p # a small constraint
residue_square =numpy.sum((r.conj()*r)[:])
residue_projection = numpy.sum((p.conj()*Ap)[:])
alfa_k = residue_square/residue_projection
print('r',residue_square,'alpha_k',alfa_k)
#alfa_k = 0.3
m = m + alfa_k * p
r2 = r - alfa_k * Ap
beta_k = numpy.sum( (r2.conj()*r2)[:] ) / residue_square
r = r2
p = r + beta_k * p
return m
# def update_u(self,murf,u, uker ,xx, bb):
## u_bar = numpy.copy(u)
##
##
## u_bar[...,:u.shape[2]/2,:] = 0.0
## u_bar[...,(u.shape[2]/2+1):,:] = 0.0
#
#
# Fhd = self.u0
## (self.u0
## - self.FhF(u_bar)#*self.st['sensemap'].conj()
## )
# m = u# - u_bar
#
#
# for jj in range(0,6):
# print('iteratin',jj)
#
# #for pkpk in range(0,cineObj.dim_x):
# # W[pkpk,...]=Normalize(W[pkpk,...])
#
# FhFWm = self.do_FhWFm(m*self.st['sensemap'])*self.st['sensemap'].conj()
#
# FhFWm = CsTransform.pynufft.CombineMulti(FhFWm,-1)
#
# lapla_m = self.do_laplacian(m, uker)
#
## lapla_Wq = self.do_laplacian(W*q,uker)
##
##
## constr=self.constraint(xx, bb)
## constr=fftpack.ifftn(constr,axes=(2,))
## constr =fftpack.ifftshift(constr,axes=(2,))
##
#
#
# #u = Normalize(u)
# #FhFWq= Normalize(FhFWq )
#
# rhs = Fhd #+ self.LMBD*self.constraint(xx,bb)
#
# lhs = FhFWm - self.LMBD*lapla_m #+ self.LMBD*m
#
# C_m= lhs - rhs
#
# m = m - 0.3*(C_m)
#
#
## if numpy.mod(jj,5) == 0:
## for pp in range(0,m.shape[2]):
## matplotlib.pyplot.subplot(4,4,pp)
## matplotlib.pyplot.imshow(numpy.sum(numpy.abs(m[...,pp,:]),-1),interpolation='nearest')
## matplotlib.pyplot.show()
# #q = q/(self.ttse)
# u = m
# return u
def shrink(self,dd,bb,thrsld):
'''
soft-thresholding the edges
'''
output_xx = dd
return output_xx #+ output_x2
def update_d(self,u,dd):
out_dd = dd
return out_dd
def make_split_variables(self,u):
x=numpy.zeros(u.shape)
y=numpy.zeros(u.shape)
tt=numpy.zeros(u.shape)
gg=numpy.zeros(u.shape)
bx=numpy.zeros(u.shape)
by=numpy.zeros(u.shape)
bt=numpy.zeros(u.shape)
bg=numpy.zeros(u.shape)
dx=numpy.zeros(u.shape)
dy=numpy.zeros(u.shape)
dt=numpy.zeros(u.shape)
dg=numpy.zeros(u.shape)
xx= (x,y,tt,gg)
bb= (bx,by,bt,bg)
dd= (dx,dy,dt,dg)
return(xx, bb, dd)
def external_update(self,u, f, uf, f0, u0): # overload the update function
CsTransform.pynufft.checkmax(self.st['sensemap'],0)
tmpuf = u*self.st['sensemap']
tmpuf = numpy.transpose(tmpuf,(1,2,3,0))
tmp_shape=tmpuf.shape
tmpuf = numpy.reshape(tmpuf,tmp_shape[0:2]+(numpy.prod(tmp_shape[2:4]),),order='F')
tmpuf = self.forwardbackward(tmpuf)
tmpuf = numpy.reshape(tmpuf ,tmp_shape,order='F')
tmpuf = numpy.transpose(tmpuf,(3,0,1,2))
tmpuf = tmpuf*self.st['sensemap'].conj()
# tmpuf=self.st['sensemap'].conj()*(
# self.CsTransform.forwardbackward(
# u*self.st['sensemap']))
if self.st['senseflag'] == 1:
tmpuf=CsTransform.pynufft.CombineMulti(tmpuf,-1)
print('start of ext_update')
# checkmax(u)
# checkmax(tmpuf)
# checkmax(self.u0)
# checkmax(uf)
fact=numpy.sum((self.u0-tmpuf)**2)/numpy.sum((u0)**2)
fact=numpy.abs(fact.real)
fact=numpy.sqrt(fact)
print('fact',fact)
# fact=1.0/(1.0+numpy.exp(-(fact-0.5)*self.thresh_scale))
# tmpuf=CsTransform.pynufft.Normalize(tmpuf)*numpy.max(numpy.abs(u0[:]))
uf = uf+(u0-tmpuf)*1.0#*fact
# uf =CsTransform.pynufft.Normalize(uf)*numpy.max(numpy.abs(u0[:]))
CsTransform.pynufft.checkmax(tmpuf,0)
CsTransform.pynufft.checkmax(u0,0)
CsTransform.pynufft.checkmax(uf,0)
# for jj in range(0,u.shape[-1]):
# u[...,jj] = u[...,jj]*self.st['sn']# rescale the final image intensity
print('end of ext_update')
murf = uf
return (f,uf,murf,u)
def _update_b(self, bb, dd, xx):
ndims=len(bb)
cc=numpy.empty(bb[0].shape)
out_bb=()
for pj in range(0,ndims):
cc=bb[pj]+dd[pj]-xx[pj]
out_bb=out_bb+(cc,)
return out_bb
def _make_sense(self,u0):
st=self.st
L=numpy.shape(u0)[-1]
u0dims= numpy.ndim(u0)
print('in make_sense, u0.shape',u0.shape)
if u0dims-1 >0:
rows=numpy.shape(u0)[0]
# dpss_rows = numpy.kaiser(rows, 100)
# dpss_rows = numpy.fft.fftshift(dpss_rows)
# dpss_rows[3:-3] = 0.0
dpss_rows = numpy.ones(rows)
# replace above sensitivity because
# Frequency direction is not necessary
dpss_fil = dpss_rows
print('dpss shape',dpss_fil.shape)
if u0dims-1 > 1:
cols=numpy.shape(u0)[1]
dpss_cols = numpy.kaiser(cols, 100)
dpss_cols = numpy.fft.fftshift(dpss_cols)
dpss_cols[3:-3] = 0.0
dpss_fil = CsTransform.pynufft.appendmat(dpss_fil,cols)
dpss_cols = CsTransform.pynufft.appendmat(dpss_cols,rows)
dpss_fil=dpss_fil*numpy.transpose(dpss_cols,(1,0))
print('dpss shape',dpss_fil.shape)
if u0dims-1 > 2:
zag = numpy.shape(u0)[2]
dpss_zag = numpy.kaiser(zag, 100)
dpss_zag = numpy.fft.fftshift(dpss_zag)
dpss_zag[3:-3] = 0.0
dpss_fil = CsTransform.pynufft.appendmat(dpss_fil,zag)
dpss_zag = CsTransform.pynufft.appendmat(dpss_zag,rows)
dpss_zag = CsTransform.pynufft.appendmat(dpss_zag,cols)
dpss_fil=dpss_fil*numpy.transpose(dpss_zag,(1,2,0)) # low pass filter
print('dpss shape',dpss_fil.shape)
#dpss_fil=dpss_fil / 10.0
rms=numpy.sqrt(numpy.mean(u0*u0.conj(),-1)) # Root of sum square
st['sensemap']=numpy.ones(numpy.shape(u0),dtype=numpy.complex64)
print('sensemap shape',st['sensemap'].shape, L)
print('u0shape',u0.shape,rms.shape)
# print('L',L)
# print('rms',numpy.shape(rms))
for ll in xrange(0,L):
st['sensemap'][...,ll]=(u0[...,ll]+1e-16)/(rms+1e-16)
print('sensemap shape',st['sensemap'].shape, L)
print('rmsshape', rms.shape)
st['sensemap'][...,ll] = fftpack.fftn(st['sensemap'][...,ll],
st['sensemap'][...,ll].shape,
range(0,numpy.ndim(st['sensemap'][...,ll])))
st['sensemap'][...,ll] = st['sensemap'][...,ll] * dpss_fil
st['sensemap'][...,ll] = fftpack.ifftn(st['sensemap'][...,ll],
st['sensemap'][...,ll].shape,
range(0,numpy.ndim(st['sensemap'][...,ll])))
# st['sensemap'][...,ll]=fftpack.ifftn(fftpack.fftn(st['sensemap'][...,ll])*dpss_fil)
# st['sensemap'] = Normalize(st['sensemap'])
return st
class Cine2DSolver(TemporalConstraint):
def create_mask(self,u0):
st=self.st
print('u0.shape',u0.shape)
rows=u0.shape[0]
cols=u0.shape[1]
kk = xrange(0,rows)
jj = xrange(0,cols)
kk = CsTransform.pynufft.appendmat(kk,cols)
jj = CsTransform.pynufft.appendmat(jj,rows).T
st['mask']=numpy.ones((rows,cols),dtype=numpy.float32)
#add circular mask
sp_rat=(rows**2+cols**2)*1.0
# for jj in xrange(0,cols):
# for kk in xrange(0,rows):
# if ( (kk-rows/2.0)**2+(jj-cols/2.0)**2 )/sp_rat > 1.0/8.0:
# st['mask'][kk,jj] = 0.0
if numpy.size(u0.shape) > 2:
for pp in range(2,numpy.size(u0.shape)):
st['mask'] = CsTransform.pynufft.appendmat(st['mask'],u0.shape[pp] )
return st
def update_u(self,murf,u, uker ,xx, bb,cineObj):
# u_bar = numpy.copy(u)
#
#
# u_bar[...,:u.shape[2]/2,:] = 0.0
# u_bar[...,(u.shape[2]/2+1):,:] = 0.0
Fhd = self.u0
# (self.u0
# - self.FhF(u_bar)#*self.st['sensemap'].conj()
# )
m = u# - u_bar
rhs = Fhd + self.constraint(xx,bb) # LMBD/gamma have been added
num_cg_step = 30
m = self.cg_step(rhs,m,uker,num_cg_step,cineObj)
u = m
return u
def create_laplacian_kernel(self,cineObj):
#===============================================================================
# # # Laplacian oeprator, convolution kernel in spatial domain
# Note only the y-axis is used
# # related to constraint
#===============================================================================
uker2 = numpy.zeros((cineObj.dim_x,)+self.st['Nd'][0:2],dtype=numpy.complex64)
rows_kd = self.st['Nd'][0] # ky-axis
#cols_kd = self.st['Kd'][1] # t-axis
# uker[0,0] = 1.0
# rate = 30.0
# uker2[0,0,0] = -4.0 - 2.0/rate
# uker2[0,0,1] =1.0
# uker2[0,0,-1]=1.0
# uker2[0,1,0] =1.0#/rate
# uker2[0,-1,0]=1.0#/rate
# uker2[1,0,0] =1.0/rate
# uker2[-1,0,0]=1.0/rate
rate = 15.0
uker2[0,0,0] = -2.0 - 4.0/rate
uker2[0,0,1] =1.0
uker2[0,0,-1]=1.0
uker2[0,1,0] =1.0/rate
uker2[0,-1,0]=1.0/rate
uker2[1,0,0] =1.0/rate
uker2[-1,0,0]=1.0/rate
uker2 = fftpack.fftn(uker2,axes=(0,1,2,)) # 256x256x16
return uker2
def constraint(self,xx,bb):
'''
include TVconstraint and others
'''
#tmp_d =get_Diff(u,jj)
rate = 15.0
cons = CsTransform.pynufft.TVconstraint(xx[0:2],bb[0:2]) * self.LMBD/rate
#cons = CsTransform.pynufft.TVconstraint(xx[0:2],bb[0:2]) * self.LMBD/100.0
cons = cons + CsTransform.pynufft.TVconstraint(xx[2:3],bb[2:3]) * self.LMBD#/rate
cons = cons + fftpack.ifftn(xx[3]-bb[3],axes = (2,))* self.LMBD/rate
# cons = cons + (xx[4]-bb[4])* self.gamma
# cons = cons + CsTransform.pynufft.TVconstraint(xx[4:5],bb[4:5])* self.LMBD
#cons = cons + xx[2]-bb[2]
#print('inside constraint, cons.shpae',cons.shape)
# cons = cons + freq_gradient_H(xx[3]-bb[3])
#print('inside constraint 1117, cons.shpae',cons.shape)
return cons
def update_d(self,u,dd):
# print('inside_update_d ushape',u.shape)
# print('inside_update_d fre grad ushape',freq_gradient(u).shape)
out_dd = ()
for jj in range(0,len(dd)) :
if jj < 2: # derivative y
#tmp_d =get_Diff(u,jj)
out_dd = out_dd + (CsTransform.pynufft.get_Diff(u,jj),)
if jj == 2: # derivative y
#tmp_d =get_Diff(u,jj)
out_dd = out_dd + (CsTransform.pynufft.get_Diff(u,jj),)
elif jj == 3: # rho
tmpu = numpy.copy(u)
tmpu = fftpack.fftn(tmpu,axes = (2,))
# tmpu[:,:,0,:] = tmpu[:,:,0,:]*0.0
out_dd = out_dd + (tmpu,)
elif jj == 4:
average_u = numpy.sum(u,2)
tmpu= numpy.copy(u)
# for jj in range(0,u.shape[2]):
# tmpu[:,:,jj,:]= tmpu[:,:,jj,:] - average_u
out_dd = out_dd + (tmpu,)
# out_dd = out_dd + (CsTransform.pynufft.get_Diff(tmpu,),)
# elif jj == 3:
# out_dd = out_dd + (freq_gradient(u),)
return out_dd
def shrink(self,dd,bb,thrsld):
'''
soft-thresholding the edges
'''
# dd2 = ()
# bb2 = ()
# for pp in range(0,2):
# dd2=dd2 + (dd[pp]/100.0,)
# bb2=bb2+ (bb[pp]/100.0,)
# dd2 = dd2 +dd[2:]
# bb2 = bb2 +bb[2:]
# tmp_xx=CsTransform.pynufft.shrink( dd2[0:2], bb2[0:2], thrsld)
#
# output_xx = ()
# for pp in range(0,2):
# output_xx = output_xx + (tmp_xx[pp]*100.0,)
#
# output_xx = output_xx + (tmp_xx[2],)
output_xx= CsTransform.pynufft.shrink( dd[0:2], bb[0:2], thrsld)# 3D thresholding
output_xx=output_xx + CsTransform.pynufft.shrink( dd[2:3], bb[2:3], thrsld)# 3D thresholding
output_xx =output_xx + CsTransform.pynufft.shrink( dd[3:4], bb[3:4], thrsld)
# output_xx =output_xx + CsTransform.pynufft.shrink( dd[4:5], bb[4:5], thrsld)
return output_xx #+ output_x2
def make_split_variables(self,u):
x=numpy.zeros(u.shape)
y=numpy.zeros(u.shape)
tt=numpy.zeros(u.shape)
gg=numpy.zeros(u.shape)
mm=numpy.zeros(u.shape)
bx=numpy.zeros(u.shape)
by=numpy.zeros(u.shape)
bt=numpy.zeros(u.shape)
bg=numpy.zeros(u.shape)
bm=numpy.zeros(u.shape)
dx=numpy.zeros(u.shape)
dy=numpy.zeros(u.shape)
dt=numpy.zeros(u.shape)
dg=numpy.zeros(u.shape)
dm=numpy.zeros(u.shape)
xx= (x,y,tt,gg)
bb= (bx,by,bt,bg)
dd= (dx,dy,dt,dg)
return(xx, bb, dd)
# class ktfocuss(Cine2DSolver):
# def kernel(self, f_internal, st , mu, LMBD, gamma, nInner, nBreg):
# self.st['sensemap']=self.st['sensemap']*self.st['mask']
# tse=cineObj.tse
# # tse=numpy.abs(numpy.mean(self.st['sensemap'],-1))
#
# tse=CsTransform.pynufft.appendmat(tse,self.st['Nd'][1])
# #tse=Normalize(tse)
# tse=numpy.transpose(tse,(0,1,3,2))
#
# print('tse.shape',tse.shape)
# # L= numpy.size(f)/st['M']
# # image_dim=st['Nd']+(L,)
# #
# # if numpy.ndim(f) == 1:# preventing row vector
# # f=numpy.reshape(f,(numpy.shape(f)[0],1),order='F')
# # f0 = numpy.copy(f) # deep copy to prevent scope f0 to f
# ## u = numpy.zeros(image_dim,dtype=numpy.complex64)
# f0=numpy.copy(f_internal)
# f=numpy.copy(f_internal)
# v= f
# u0=self.fun1(f_internal,
# # cineObj.dim_x,
# # self.st['Nd'][0],
# # self.st['Nd'][1],
# # cineObj.ncoils,
# # self.CsTransform
# ) # doing spatial transform
#
# pdf = cineObj.pdf
# pdf = CsTransform.pynufft.appendmat(pdf,self.st['Nd'][1])
# pdf = numpy.transpose(pdf,(0,1,3,2))
# # matplotlib.pyplot.imshow(pdf[:,:,0,0].real)
# # matplotlib.pyplot.show()
# u0 = fftpack.fftn(u0,axes=(1,))
# u0 = fftpack.fftshift(u0,axes=(1,))
# #u0[:,:,u0.shape[2]/2,:] = 2*u0[:,:,u0.shape[2]/2,:]/pdf[:,:,u0.shape[2]/2,:]
# u0 = u0 /pdf
# u0 = fftpack.ifftshift(u0,axes=(1,))
# u0 = fftpack.ifftn(u0,axes=(1,))
#
# # print('cineObj.pdf.shape',cineObj.pdf.shape)
# # for pj in range(0,4):
# # matplotlib.pyplot.imshow(cineObj.pdf[:,:,pj].real)
# # matplotlib.pyplot.show()
#
# u0=self.fun2(u0)
#
# u0 = u0*self.st['sensemap'].conj()
#
# u0 = CsTransform.pynufft.CombineMulti(u0,-1)
# for pp in range(0,4):
# matplotlib.pyplot.subplot(2,2,pp)
# matplotlib.pyplot.imshow(numpy.sum(numpy.abs(u0[...,u0.shape[2]/2+1,:]),-1),norm=norm,interpolation='nearest')
# matplotlib.pyplot.show()
# u = numpy.copy(u0)
# print('u0.shape',u0.shape)
#
# u_bar = numpy.copy(u)
# u_bar = fftpack.fftshift(u_bar,axes=(2,))
# u_bar[...,1:,:] = 0.0
# u_bar = fftpack.fftshift(u_bar,axes=(2,))
#
# FhFu_bar = self.FhF(u_bar)#*self.st['sensemap'])*self.st['sensemap'].conj()
# #FhFu_bar = CombineMulti(FhFu_bar,-1)
#
# u_ref = u0 - FhFu_bar
#
# q = u - u_bar
#
# W=numpy.sqrt(numpy.abs(q))
#
# for jj in range(0,self.nInner):
# print('iteratin',jj)
# #W=numpy.sqrt(numpy.abs(q*W))
# #W=Normalize(W)
#
#
# FhFWq = self.FhF(W*q)#*self.st['sensemap'])*self.st['sensemap'].conj()
# #FhFWq = CombineMulti(FhFWq, -1)
#
# C_q= -W*(u_ref - FhFWq) + 0.02*q
# q=q-0.3*C_q
# W = numpy.sqrt(numpy.abs(W*q))
# #q=q#*tse
# #q=Normalize(q)
# for pp in range(0,u0.shape[2]):
# matplotlib.pyplot.subplot(numpy.sqrt(u0.shape[2])+1,numpy.sqrt(u0.shape[2])+1,pp)
# matplotlib.pyplot.imshow(numpy.sum(numpy.abs(q[...,pp,:]),-1),norm=norm,interpolation='nearest')
# matplotlib.pyplot.show()
# u = q*W + u_bar
# # for pp in range(0,u0.shape[2]):
# # matplotlib.pyplot.subplot(4,4,pp)
# # matplotlib.pyplot.imshow(numpy.sum(numpy.abs(u[...,pp,:]),-1),norm=norm,interpolation='nearest')
# # matplotlib.pyplot.show()
# #
#
#
# return (u,u*W) | gpl-3.0 |
aflaxman/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 70 | 7808 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
PrashntS/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
rhuelga/sms-tools | lectures/06-Harmonic-model/plots-code/harmonic-inharmonic-sines.py | 2 | 2249 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
plt.figure(1, figsize=(9, 7))
plt.subplot(211)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/vibraphone-C6.wav'))
w = np.blackman(401)
N = 512
H = 100
t = -100
minSineDur = .02
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.01
mX, pX = STFT.stftAnal(x, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 10000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (vibraphone-C6.wav)')
plt.subplot(212)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/vignesh.wav'))
w = np.blackman(1101)
N = 2048
H = 250
t = -90
minSineDur = .1
maxnSines = 200
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 5000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (vignesh.wav)')
plt.tight_layout()
plt.savefig('harmonic-inharmonic-sines.png')
plt.show()
| agpl-3.0 |
0x0all/scikit-learn | examples/applications/plot_model_complexity_influence.py | 25 | 6378 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2**-15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
KarlClinckspoor/SAXS_treatment | WLM models/SAXS_FF_ipython.py | 1 | 11295 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 6 13:17:45 2017
@author: Karl
"""
import numpy as np
from numpy import sin, cos
import matplotlib.pyplot as plt
from scipy.special import jv
from scipy import integrate
from WLM_models.S_KP_EXV import *
# Not really a problem since S_KP_EXV has only a few functions, and no overlap.
def sphere_FF(q, R):
if R == 0:
return 0
FF = ((3 * (sin(q * R) - q * R * cos(q * R)) / (q * R) ** 3) ** 2)
return FF
def FI(X):
if X > 0.05:
FI = 3 * (sin(X) - X * cos(X))/(X**3)
return FI
else:
FI = 1 - 0.1 * X * X
return FI
def spherical_shell(q, R1, R2):
def V(R):
return 4 / 3 * np.pi * R ** 3
if R1 < R2:
return (V(R2) * sphere_FF(q, R2) - V(R1) * sphere_FF(q, R1)) / (V(R2) - V(R1))
if R1 > R2:
return (V(R1) * sphere_FF(q, R1) - V(R2) * sphere_FF(q, R2)) / (V(R1) - V(R2))
def Bessel():
orders = [0, 1, 2, 3, 4, 5]
x = np.linspace(0, 10, 500)
for order in orders:
plt.plot(x, jv(order, x))
plt.show()
def Cylinder_FF_to_integrate3(q, R, L, al):
qrsin = q*R*sin(al)
qlcos = q*L*cos(al)/2
value = (2*jv(1, qrsin)/qrsin * sin(qlcos)/qlcos)**2 * sin(al)
return value
# Common units: R = 30, L = 120, length = 500, qs = np.logspace(-2, 0, num = length)
def Cylinder_FF_integrated(qs, R, L):
"""Uses simpson integration method to get the form function of a cylinder with the dimensions L and R"""
alphas = np.linspace(1e-2, np.pi / 2, num=50)
int_simps = np.zeros(len(qs))
to_int_simps = np.zeros(len(qs))
for i, q in enumerate(qs):
for j, alpha in enumerate(alphas):
qrsin = q * R * sin(alpha)
qlcos = q * L * cos(alpha) / 2
to_int_simps[j] = (2 * jv(1, qrsin) / qrsin * sin(qlcos) / qlcos) ** 2 * sin(alpha)
int_simps[i] = integrate.simps(to_int_simps, x=alphas)
return int_simps
def Guinier(I0, q, Rg):
return I0*np.exp(-(q**2*Rg**2)/3)
def Ellipsoid_to_int(q, R, Ep, Th):
def r_f(R, Ep, Th):
return R * (sin(Th) ** 2 + Ep ** 2 * cos(Th) ** 2) ** (1 / 2)
r = r_f(R, Ep, Th)
Num = 3 * (sin(q*r) - q*r*cos(q*r))
Den = (q*r)**3
to_int = (Num/Den)**2*sin(Th)
return to_int
# todo
def Ellipsoid_integrated(q, R, Ep, Th):
def r_f(R, Ep, Th):
return R * (sin(Th) ** 2 + Ep ** 2 * cos(Th) ** 2) ** (1 / 2)
def wormlike_chains(q, scale, d_head, Rcore, rho_rel, sigma, back, cont_l, kuhn_l, eps, dist_cq, nu_ppa,
scale_pow, exponent):
Rout = Rcore + d_head
FX = (((rho_rel * Rout ** 2 * 2 * jv(1, Rout * q) / (q * Rout)) -
((rho_rel + 1) * Rcore ** 2 * 2 * jv(1, Rcore * q) / (q * Rcore))) *
np.exp((-1/2) * q ** 2 * sigma ** 2))
FOX = rho_rel * Rout ** 2 - (rho_rel + 1) * Rcore ** 2
cyl_int = 0 # SUM
npoints = 50
step = 0.5 * np.pi / npoints
EPSO = (Rcore * eps + d_head) / Rout
for i in range(1, npoints + 1):
alpha = (i - 0.5) * step
sinalpha = sin(alpha)
SC = np.sqrt(sinalpha ** 2 + eps ** 2 * (1 - sinalpha ** 2))
RA = Rcore * SC
# RO = R + A(2) - unnecessary
SCO = np.sqrt(sinalpha ** 2 + EPSO ** 2 * (1 - sinalpha ** 2))
RAO = Rout * SCO
F = (((EPSO * rho_rel * Rout ** 2 * 2 * jv(1, RAO * q) / (q * RAO)) -
((rho_rel + 1) * eps * Rcore ** 2 * 2 * jv(1, RA * q) / (q * RA))) *
np.exp((-0.5) * q ** 2 * sigma ** 2))
cyl_int = cyl_int + F ** 2 # === SUM=SUM+F**2
cyl_int = cyl_int * step * 2 / np.pi
FO = EPSO * rho_rel * Rout ** 2 - (rho_rel + 1) * eps * Rcore ** 2
# if J == 1:
# print (qs, FX ** 2, FOX ** 2, cyl_int, FO ** 2)
SQ = S_KP_EXV(q, cont_l, kuhn_l)
CQ = FI(q * dist_cq)
FORMF = SQ * cyl_int / (FO ** 2)
Rout = abs(d_head) + abs(Rcore) # necessary?
FORMFX = SQ # 2 * jv(1, qs * Rout) / (qs * Rout) ** 2
XSEC = scale * FORMF / (1 + nu_ppa * CQ * FORMFX) + back + scale_pow* (0.01 / q) ** exponent
return XSEC
def WLM_whole_q(qs, scale, d_head, rad_core, rho_rel, sigma, back, L, kuhn, eps, D_CQ, nu_rpa, SC_pow, exponent):
ints = np.zeros(len(qs))
for i, q in enumerate(qs):
ints[i] = wormlike_chains(q, scale, d_head, rad_core, rho_rel, sigma, back, L, kuhn, eps, D_CQ, nu_rpa,
SC_pow, exponent)
return ints
def coreshell_micelles(q, scale, d_head, a, b, c, rho_out, sigma, back, eta_hs, r_hs):
# A(1) = SCALE
# A(2) = D_HEAD
# A(3) = A-AXE
# A(4) = B-AXE
# A(5) = C-AXE
# A(6) = RHO_OUT
# A(7) = SIGMA
# A(8) = BACKGROUND
# A(9) = ETA_HS
# A(10) = R_HS
PI = np.pi()
NPOI = 50
STEP = 0.5 * PI / NPOI
SUM = 0
SUM1 = 0
# Graded interface factor
GRAD_O = np.exp(-q * q * sigma * sigma * 0.5)
GRAD_I = np.exp(-q * q * sigma * sigma * 0.5)
DRHO_CORE = -1
V_CORE = 4 * PI / 3 * a * b * c
V_OUT = 3 * PI / 3 * (a + d_head) * (b + d_head) * (c + d_head)
V_SHELL = V_OUT - V_CORE
DRHO_HEAD = rho_out
for III in range (1, NPOI + 1, 1):
YY = (float(III) - 0.5) * STEP
YY = sin(YY)
for II in range (1, NPOI + 1, 1):
XX = (float(II) - 0.5) * STEP
XX = sin(XX)
ARG_C = np.sqrt((a ** 2 * YY ** 2 + b ** 2 * (1-YY ** 2)) * XX** 2 + c ** 2 * (1 - XX ** 2))
ARG_O = np.sqrt(((a+d_head) ** 2 * YY ** 2 + (b + d_head) ** 2 * (1 - YY ** 2)) * XX ** 2 + (c + d_head) ** 2 * (1 - XX ** 2))
FF = V_OUT * DRHO_HEAD * FI(q * ARG_O) * GRAD_O - V_CORE * (DRHO_HEAD - DRHO_CORE) * FI(q * ARG_C) * GRAD_I
SUM = SUM + FF ** 2 ** XX
SUM1 = SUM1 + FF * XX
F0 = V_OUT * DRHO_HEAD - V_CORE * (DRHO_HEAD - DRHO_CORE)
F2 = SUM * STEP ** 2 / F0 ** 2 * 2 * PI
FFF = SUM1 * STEP ** 2 / F0 * 2 / PI
#print(q, F2, FFF, FFF ** 2)
SQ = S_HS(q, r_hs, eta_hs)
XSEC = scale * (F2 + FFF ** 2 * (SQ - 1)) + back
return XSEC
def S_HS(Q, RHS, ETA):
# Hard sphere structure factor
# q = scattering vector
# r_hs = interaction radius of hard spheres
# eta = volume fraction of hard spheres
ALN = (1 - ETA) ** 4
AL = (1 + 2 * ETA) ** 2 / ALN
BE = -6 * ETA * (1 + 0.5 * ETA) ** 2 / ALN
GA = 0.5 * ETA * AL
AR = 0.5 * ETA * AL
# Low argument expansion
if AR < 0.4:
GG = (AL * (1/3 - AR * AR / 30) + BE * (1/4 - AR * AR / 36)
# + GA * (1/6 - AR * AR / 48)
# Check if ! at the 6th position is comment or a code to continue the line.
)
else:
SA = sin(AR)
CA = cos(AR)
GG = (AL * (SA - AR * CA) / AR **3
# + BE * (2 * AR * SA + (2 - AR ** 2) * CA - 2) / AR ** 4 + GA *
# (-AR ** 4 * CA + 4 * ((3 * AR ** 2 - 6) * CA + (AR ** 3 - 6 * AR) * SA + 6)) / AR ** 6
)
S_HS = 1 / (1 + 24 * ETA * GG)
return S_HS
if __name__ == '__main__':
q = 1
r_hs = 1
eta = 0.01
print(q, r_hs, eta, S_HS(q, r_hs, eta))
npoints = 50
# qs = np.logspace(np.log10(5.11423033E-03), np.log10(0.254317015), num = npoints)
qs = np.logspace(-2.29, -0.59, num=npoints)
scale = 0.1440E+00
d_head = 0.1929E+02
rad_core = 0.8109E+01
rho_rel = 0.5999E-01
sigma = 0.1000E+01
back = 0.0
L = 0.5000E+04
kuhn = 0.1000E+04
eps = 0.1000E+01
D_CQ = 0.1050E+03
nu_rpa = 0.3846E+02
SC_pow = 0.6757E-03
exponent = 4
# parameters = [scale, d_head, rad_core, rho_rel, sigma, back, L, kuhn, eps, D_CQ, nu_rpa, SC_pow, exponent]
# parameters = np.array(parameters)
nus = [20, 30, 40, 50, 60, 70, 80]
D_CQs = [40, 80, 100, 120, 150, 200]
ints_nu = []
for nu in nus:
scatt = WLM_whole_q(qs, scale, d_head, rad_core, rho_rel, sigma, back, L, kuhn, eps, D_CQ, nu,
SC_pow, exponent)
ints_nu.append(scatt)
ints_cqs = []
for cq in D_CQs:
scatt = WLM_whole_q(qs, scale, d_head, rad_core, rho_rel, sigma, back, L, kuhn, eps, cq, nu_rpa,
SC_pow, exponent)
ints_cqs.append(scatt)
plt.xscale('log')
plt.yscale('log')
for int, nu in zip(ints_nu, nus):
plt.plot(qs, int, label = ('Nu = ' + str(nu)))
plt.legend()
plt.show()
plt.xscale('log')
plt.yscale('log')
for int, cq in zip(ints_cqs, D_CQs):
plt.plot(qs, int, label = ('CQ = ' + str(cq)))
plt.legend()
plt.show()
# for i, q in enumerate(qs):
# ints[i] = wormlike_chains(q, scale, d_head, rad_core, rho_rel, sigma, back, L, kuhn, eps, D_CQ, nu_rpa,
# SC_pow, exponent)
"""
with open('SAXS_test.dat', 'r') as fhand:
q_test = []
int_test = []
for line in fhand:
line = line.strip()
line = line.split(' ')
if len(line) < 3:
continue
q, Int, err = line
q_test.append(float(q)/10)
int_test.append(float(Int))
plt.xscale('log')
plt.yscale('log')
plt.plot(qs, ints)
plt.plot(q_test, int_test)
plt.title('Sim1')
plt.savefig('sucesso.png')
plt.show()
"""
# FORTRAN 77 Source code
# Function FI
"""
FUNCTION FI(X)
IF(X.GT.0.05)THEN
FI=3.*(SIN(X)-X*COS(X))/X**3
RETURN
ELSE
FI=1.-0.1*X*X
RETURN
END IF
END
"""
# Function wormlike micellar core-shell chain
"""
IF(NXS.EQ.2)THEN
c infinite core-shell cylinders
C
C A(1) = SCALE
C A(2) = D_HEAD
C A(3) = R_IN
C A(4) = REL_RHO_OI
C A(5) = SIGMA
C A(6) = BACKGROUND
C A(7) = CONTOUR LENGTH
C A(8) = KUHN LENGTH
C A(9) = EPS XSEC
RO=A(3)+A(2)
R=A(3)
EPS=A(9)
BCK=A(6)
C
PI=ACOS(-1.)
FX=( A(4)*RO**2* 2.*BESSJ1(RO*X(1))/(X(1)*RO)
1 -(A(4)+1.)*A(3)**2*2.*BESSJ1(A(3)*X(1))/(X(1)*A(3))
1 ) *EXP(-0.5*X(1)**2*A(5)**2)
F0X=A(4)*RO**2-(A(4)+1.)*A(3)**2
SUM=0.
NPOI=50
STEP=0.5*ACOS(-1.)*1./FLOAT(NPOI)
DO II=1,NPOI
XX=(FLOAT(II)-0.5)*STEP
XX=SIN(XX)
SC=SQRT(XX**2+EPS**2*(1.-XX**2))
RA=R*SC
RO=R+A(2)
EPSO=(R*EPS+A(2))/RO
SCO=SQRT(XX**2+EPSO**2*(1.-XX**2))
RAO=RO*SCO
F=( EPSO*A(4)*RO**2* 2.*BESSJ1(RAO*X(1))/(X(1)*RAO)
1 -(A(4)+1.)*EPS*A(3)**2*2.*BESSJ1(RA*X(1))/(X(1)*RA)
1 ) *EXP(-0.5*X(1)**2*A(5)**2)
SUM=SUM+F**2
END DO
SUM=SUM*STEP*2./PI
F0=EPSO*A(4)*RO**2-(A(4)+1.)*EPS*A(3)**2
IF(J.EQ.1)WRITE(*,*)X(1),FX**2,F0X**2,SUM,F0**2
SQ=S_KP_EXV(X(1),A(7),A(8))
C
C PRISM-RPA CONCENTRATION EFFECTS
C
CQ=FI(X(1)*A(10))
C
FORMF=SQ*SUM/(F0)**2
RO=ABS(A(2))+ABS(A(3))
c FORMFX=SQ*(2.*BESSJ1(X(1)*RO)/(X(1)*RO))**2
FORMFX=SQ
XSEC=A(1)*FORMF/(1.+A(11)*CQ*FORMFX)+BCK
1 +A(12)*(0.01/X(1))**A(13)
RETURN
END IF
"""
| gpl-3.0 |
nrhine1/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
joyeshmishra/spark-tk | regression-tests/sparktkregtests/testcases/frames/ecdf_test.py | 13 | 3032 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests the ECDF functionality """
import unittest
import random
from sparktkregtests.lib import sparktk_test
class ecdfTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(ecdfTest, self).setUp()
# generate a dataset to test ecdf on
# it will just be a single column of ints
column = [[random.randint(0, 5)] for index in xrange(0, 20)]
schema = [("C0", int)]
self.frame = self.context.frame.create(column,
schema=schema)
def validate_ecdf(self):
# call sparktk ecdf function on the data and get as pandas df
ecdf_sparktk_result = self.frame.ecdf("C0")
pd_ecdf = ecdf_sparktk_result.to_pandas(ecdf_sparktk_result.row_count)
# get the original frame as pandas df so we can calculate our own result
pd_original_frame = self.frame.to_pandas(self.frame.row_count)
# the formula for calculating ecdf is
# F(x) = 1/n * sum from 1 to n of I(x_i)
# where I = { 1 if x_i <= x, 0 if x_i > x }
# i.e., for each element in our data column count
# the number of items in that row which are less than
# or equal to that item, divide by the number
# of total items in the column
grouped = pd_original_frame.groupby("C0").size()
our_result = grouped.sort_index().cumsum()*1.0/len(pd_original_frame)
# finaly we iterate through the sparktk result and compare it with our result
for index, row in pd_ecdf.iterrows():
self.assertAlmostEqual(row["C0"+'_ecdf'],
our_result[int(row["C0"])])
def test_ecdf_bad_name(self):
"""Test ecdf with an invalid column name."""
with self.assertRaisesRegexp(Exception, "No column named bad_name"):
self.frame.ecdf("bad_name")
def test_ecdf_bad_type(self):
"""Test ecdf with an invalid column type."""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.frame.ecdf(5)
def test_ecdf_none(self):
"""Test ecdf with a None for the column name."""
with self.assertRaisesRegexp(Exception, "column is required"):
self.frame.ecdf(None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/tests/test_backend_ps.py | 10 | 2166 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import re
import six
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup, knownfailureif
needs_ghostscript = knownfailureif(
matplotlib.checkdep_ghostscript()[0] is None,
"This test needs a ghostscript installation")
needs_tex = knownfailureif(
not matplotlib.checkdep_tex(),
"This test needs a TeX installation")
def _test_savefig_to_stringio(format='ps'):
buffers = [
six.moves.StringIO(),
io.StringIO(),
io.BytesIO()]
plt.figure()
plt.plot([0, 1], [0, 1])
plt.title("Déjà vu")
for buffer in buffers:
plt.savefig(buffer, format=format)
values = [x.getvalue() for x in buffers]
if six.PY3:
values = [
values[0].encode('ascii'),
values[1].encode('ascii'),
values[2]]
# Remove comments from the output. This includes things that
# could change from run to run, such as the time.
values = [re.sub(b'%%.*?\n', b'', x) for x in values]
assert values[0] == values[1]
assert values[1] == values[2].replace(b'\r\n', b'\n')
for buffer in buffers:
buffer.close()
@cleanup
def test_savefig_to_stringio():
_test_savefig_to_stringio()
@cleanup
@needs_ghostscript
def test_savefig_to_stringio_with_distiller():
matplotlib.rcParams['ps.usedistiller'] = 'ghostscript'
_test_savefig_to_stringio()
@cleanup
@needs_tex
def test_savefig_to_stringio_with_usetex():
matplotlib.rcParams['text.latex.unicode'] = True
matplotlib.rcParams['text.usetex'] = True
_test_savefig_to_stringio()
@cleanup
def test_savefig_to_stringio_eps():
_test_savefig_to_stringio(format='eps')
@cleanup
@needs_tex
def test_savefig_to_stringio_with_usetex_eps():
matplotlib.rcParams['text.latex.unicode'] = True
matplotlib.rcParams['text.usetex'] = True
_test_savefig_to_stringio(format='eps')
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
Hash--/ICRH | WEST_design/plot_currents_vs_Rc.py | 2 | 4335 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 21:39:58 2015
In this script we plot the capacitor currents vs coupling resistance.
The currents have been calculated for a input power of 1.5MW per 1/2 antenna.
@author: hash
"""
import pandas as pd
from matplotlib.pylab import *
# open the excel sheet with pandas
# The data comes from the final antenna model made with Designer
data = pd.read_excel(io='./data/TOPICA/ToreSupra_WEST/WEST_ICRH_Compilation_resultats_designer.xlsx',
sheetname=0)
#The information we want to plot correspond to the coupling resistance
#and the capacitor currents
# making a new convient dataFrame and removed the missing data lines
Icapa_Designer = data[['Plasma Model Coupling Resistance [Ohm] (calculated)',
'Matching Impedace Target [Ohm]',
'I1H [kA]', 'I1B [kA]', 'I2H [kA]', 'I2B [kA]']].dropna()
Icapa_Designer.rename(columns = { \
'Plasma Model Coupling Resistance [Ohm] (calculated)':'Rc',
'Matching Impedace Target [Ohm]':'Zmatch'}, inplace=True)
Icapa_Designer.set_index('Rc', inplace=True)
Icapa_Designer_ideal_matching = Icapa_Designer[Icapa_Designer['Zmatch'] == '29.74 - 0j'].drop('Zmatch', axis=1)
Icapa_Designer_degrd_matching = Icapa_Designer[Icapa_Designer['Zmatch'] == '29.74 - 15j'].drop('Zmatch', axis=1)
# Data from Python skrf modeling
#
data2 = pd.read_excel(io='./data/TOPICA/ToreSupra_WEST/WEST_ICRH_Compilation_resultats_designer.xlsx',
sheetname=1)
Icapa_Python = data2[['Plasma Model Coupling Resistance [Ohm] (calculated)',
'Matching Impedace Target [Ohm]',
'I1H [kA]', 'I1B [kA]', 'I2H [kA]', 'I2B [kA]']].dropna()
Icapa_Python.rename(columns = { \
'Plasma Model Coupling Resistance [Ohm] (calculated)':'Rc',
'Matching Impedace Target [Ohm]':'Zmatch'}, inplace=True)
Icapa_Python.set_index('Rc', inplace=True)
Icapa_Python_ideal_matching = Icapa_Python[Icapa_Python['Zmatch'] == '29.74 - 0j'].drop('Zmatch', axis=1)
Icapa_Python_degrd_matching = Icapa_Python[Icapa_Python['Zmatch'] == '29.74 - 15j'].drop('Zmatch', axis=1)
def plot_figure(I_ideal, I_degrd, fig=None):
# average and std
I_ideal_min = I_ideal.groupby(I_ideal.index).min().min(axis=1)
I_ideal_max = I_ideal.groupby(I_ideal.index).max().max(axis=1)
I_degrd_min = I_degrd.groupby(I_degrd.index).min().min(axis=1)
I_degrd_max = I_degrd.groupby(I_degrd.index).max().max(axis=1)
# For an unkown reason, I need to cast the index values into a numpy array
# forcing the dtype to be float with pandas '0.13.1'
x_ideal = np.array(I_ideal_min.index.values, dtype=float)
x_degrd = np.array(I_degrd_min.index.values, dtype=float)
y1_ideal = I_ideal_min.values
y2_ideal = I_ideal_max.values
y1_degrd = I_degrd_min.values
y2_degrd = I_degrd_max.values
# import pdb; pdb.set_trace()
figure(fig)
if fig: # clean the figure if exists
clf()
fill_between(x_ideal, y1_ideal, y2_ideal,
alpha=0.2, color= 'b')
fill_between(x_degrd, y1_degrd, y2_degrd,
alpha=0.2, color= 'r')
ylim(0, 2.2)
grid(True)
# The fill_between() command creates a PolyCollection that is not supported by the legend() command.
# Therefore you will have to use another matplotlib artist (compatible with legend()) as a proxy,
# http://stackoverflow.com/questions/14534130/legend-not-showing-up-in-matplotlib-stacked-area-plot
plt.plot([], [], color='b', linewidth=10, alpha=0.2)
plt.plot([], [], color='r', linewidth=10, alpha=0.2)
legend(('VSWR=1', 'VSWR<1.7'), loc='best')
# position bottom right
text(2.5, 1.0, 'PRELIMINARY',
fontsize=50, color='gray',
ha='right', va='bottom', alpha=0.2)
# compare dataset
plot_figure(Icapa_Python_ideal_matching, Icapa_Python_degrd_matching, 1)
title('Python')
axhline(0.85, color="k", ls='--', lw=2)
plot_figure(Icapa_Designer_ideal_matching, Icapa_Designer_degrd_matching, 2)
title('Designer')
Icapa_ideal_concat = pd.concat((Icapa_Designer_ideal_matching, Icapa_Python_ideal_matching))
Icapa_degrd_concat = pd.concat((Icapa_Designer_degrd_matching, Icapa_Python_degrd_matching))
plot_figure(Icapa_ideal_concat, Icapa_degrd_concat, 3)
title('All') | mit |
mjudsp/Tsallis | sklearn/linear_model/tests/test_logistic.py | 24 | 39507 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import compute_class_weight
from sklearn.utils.fixes import sp_version
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.model_selection import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg', 'sag']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag']:
max_iter = 100 if solver == 'sag' else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-2, cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
msg = ("In LogisticRegressionCV the liblinear solver cannot handle "
"multiclass with class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set class_weight='balanced'")
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raise_message(ValueError, msg, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1")
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1")
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_multinomial_logistic_regression_with_classweight_auto():
X, y = iris.data, iris.target
model = LogisticRegression(multi_class='multinomial',
class_weight='auto', solver='lbfgs')
# 'auto' is deprecated and will be removed in 0.19
assert_warns_message(DeprecationWarning,
"class_weight='auto' heuristic is deprecated",
model.fit, X, y)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))
assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))
for solver in ['sag', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))
assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and the other solvers
assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)
assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)
assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| bsd-3-clause |
motobiker2008/aotm | freq_spectr.py | 1 | 11067 | # coding=utf-8
import numpy as np
from matplotlib import mlab
from matplotlib import pyplot
from matplotlib import cbook
def draw_specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128, cmap=None, xextent=None,
pad_to=None, sides='default', scale_by_freq=None, hold=None,
**kwargs):
ax = pyplot.gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = specgram1(x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, cmap=cmap,
xextent=xextent, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq, **kwargs)
pyplot.draw_if_interactive()
finally:
ax.hold(washold)
pyplot.sci(ret[-1])
return ret
def specgram1(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, **kwargs):
"""
Plot a spectrogram.
Call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, **kwargs)
Вычисление и построение спектрограммы данных *x*. Данные делятся на сегменты длины
*NFFT* и для них вычисляется Плотность спектральной мощности.
Окна Ханнинга применяются посегментно с перекрытием равным *noverlap*.
Цветом обозначена мощность в децибелах
%(PSD)s
*noverlap*: integer
Число элементов в перекрытии окон (по умолчанию 128)
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.colors.Colormap` instance; if *None*, use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`~matplotlib.mlab.specgram`
*kwargs*:
Additional kwargs are passed on to imshow which makes the
specgram image
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* массив временных точек
- *freqs* массив частот
- *Pxx* массив мощностей вида `(len(times), len(freqs))`
- *im* is a :class:`~matplotlib.image.AxesImage` instance
.. note::
Если в данные только действительные, то и отображаться будет
только положительный спектр, иначе оба.
Изменено может быть с помощью переменной *sides*
Also note that while the plot is in dB, the *Pxx* array returned is
linear in power.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold:
self.cla()
Pxx, freqs, bins = specgram2(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None:
xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent, **kwargs)
self.axis('auto')
return Pxx, freqs, bins, im
def specgram2(x, NFFT=256, Fs=2, detrend=mlab.detrend_none, window=mlab.window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segments and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 128.
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert (NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real # Needed since helper implements generically
return Pxx, freqs, t
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
# The checks for if y is x are so that we can use the same function to
# implement the core of psd(), csd(), and spectrogram() without doing
# extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
else:
y = x
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to // 2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
if cbook.iterable(window):
assert (len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs, n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i] + NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i] + NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:, i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
Pxy /= (np.abs(windowVals) ** 2).sum()
# Also include scaling factors for one-sided densities and dividing by the
# sampling frequency, if desired. Scale everything, except the DC component
# and the NFFT/2 component:
Pxy[1:-1] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
Pxy /= Fs
t = 1. / Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[numFreqs // 2:] - Fs, freqs[:numFreqs // 2]))
Pxy = np.concatenate((Pxy[numFreqs // 2:, :], Pxy[:numFreqs // 2, :]), 0)
return Pxy, freqs, t
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, float)
n = np.arange(0, M)
return 0.5-0.5*np.cos(2.0*np.pi*n/(M-1)) | gpl-2.0 |
subodhchhabra/pandashells | pandashells/bin/p_hist.py | 2 | 4509 | #! /usr/bin/env python
# standard library imports
import sys
import argparse
import importlib
import textwrap
from pandashells.lib import module_checker_lib, arg_lib
module_checker_lib.check_for_modules(['pandas'])
from pandashells.lib import io_lib
import numpy as np
import pandas as pd
# this silly function makes mock testing easier
def get_imports(name): # pragma no cover
return importlib.import_module(name)
def get_input_args():
msg = textwrap.dedent(
"""
Plot histograms from input data. Can either plot just a single
histogram or a grid of histograms with different columns of data.
When multiple columns are specified, creates a grid of histograms,
one for each specified column.
-----------------------------------------------------------------------
Examples:
* Plot histogram of a beta distriubtion
p.rand -t beta --alpha 3 --beta 10 -n 10000\\
| p.hist --names beta -n 50
* Plot a sid-by-side comparison of a gamma and normal distriubtion
paste <(p.rand -t normal -n 10000 | p.df --names normal)\\
<(p.rand -t gamma -n 10000 | p.df --names gamma)\\
| p.hist -i table -c normal gamma
-----------------------------------------------------------------------
"""
)
# read command line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
arg_lib.add_args(parser, 'io_in', 'io_out', 'decorating')
# specify columns to histogram
parser.add_argument(
'-c', '--cols', help='Column(s) to histogram', nargs='+')
parser.add_argument(
'-q', '--quiet', action='store_true', default=False,
help='Quiet mean no plots. Send numeric output to stdout instead')
parser.add_argument(
'-n', '--nbins', help='Number of bins (default=30)', nargs=1,
default=[30], type=int)
parser.add_argument(
'-r', '--range', help='Range (min max) of x axis', nargs=2,
default=None, type=float)
parser.add_argument(
'-l', '--layout', help='Layout (rows, cols)',
nargs=2, default=None, type=int)
parser.add_argument(
'-a', '--alpha', help='Set opacity of hist bars', nargs=1,
default=[1.], type=float)
parser.add_argument(
'-d', '--density', action='store_true', default=False,
help='Show probability density instead of counts')
parser.add_argument(
'--sharex', action='store_true', default=False,
help='Make all x axes have the same range')
parser.add_argument(
'--sharey', action='store_true', default=False,
help='Make all y axes have the same range')
return parser.parse_args()
def validate_args(args, cols, df):
# make sure all columns exist in dataframe
bad_col_list = [c for c in cols if c not in df.columns]
if bad_col_list:
msg = '\n\nThese columns were not found:\n\t'
msg += ',\n\t'.join(bad_col_list)
sys.stderr.write(msg + '\n')
sys.exit(1)
if args.quiet and len(cols) > 1:
msg = "Quiet is only allowed for single histograms"
sys.stderr.write(msg)
sys.exit(1)
def main():
args = get_input_args()
df = io_lib.df_from_input(args)
# extract parameters from arg parser
nbins = args.nbins[0]
range_tup = args.range
layout_tup = args.layout
alpha = args.alpha[0]
do_density = args.density
sharex = args.sharex
sharey = args.sharey
cols = args.cols if args.cols else [df.columns[0]]
validate_args(args, cols, df)
# no plotting if output requested
if args.quiet:
counts, edges = np.histogram(
df[cols[0]], bins=nbins, range=range_tup, density=do_density)
centers = edges[:-1] + 0.5 * np.diff(edges)
df_out = pd.DataFrame({'bins': centers, 'counts': counts})
io_lib.df_to_output(args, df_out)
# otherwise do plotting
else:
module_checker_lib.check_for_modules(['matplotlib'])
plot_lib = get_imports('pandashells.lib.plot_lib')
plot_lib.set_plot_styling(args)
df.hist(cols, bins=nbins, range=range_tup,
alpha=alpha, sharex=sharex, sharey=sharey, layout=layout_tup,
normed=do_density)
plot_lib.refine_plot(args)
plot_lib.show(args)
if __name__ == '__main__': # pragma: no cover
main()
| bsd-2-clause |
ngannguyen/immunoseq | src/lendist.py | 1 | 1833 | #!/usr/bin/env python2.6
'''
05/01/2012 nknguyen soe ucsc edu
Input: input directory of fasta files
Output: Plot showing sequence length distribution
'''
import os, sys, re
import matplotlib.pyplot as pyplot
from immunoseq.lib.immunoseqLib import *
from immunoseq.lib.lendistLib import *
def getLenDist(seqs):
len2freq = {} #key = len, values = (readFreq, uniqFreq)
for s in seqs.values():
l = len(s.seq)
if l not in len2freq:
len2freq[l] = (s.count, 1)
else:
len2freq[l][0] += s.count
len2freq[l][1] += 1
total = len(seqs)
totalreads = sum([s.count for s in seqs.values()])
if total > 0:
for l, c in len2freq.iteritems():
len2freq[l][0] = c[0]*100.0/totalreads
len2freq[l][1] = c[1]*100.0/total
return len2freq
def getSampleLenDist(samples):
sample2len2freq = {}
lens = []
for sample in samples:
len2freq = getLenDist( sample.seqs )
sample2len2freq[sample.name] = len2freq
for l in len2freq.keys():
if l not in lens:
lens.append(l)
for s, l2f in sample2len2freq.iteritems():
for l in lens:
if l not in l2f:
l2f[l] = (0.0, 0.0)
return sample2len2freq, sorted(lens)
def main():
parser = initOptions3()
initPlotOptions( parser )
options, args = parser.parse_args()
checkPlotOptions( options, parser )
checkOptions3(parser, args, options)
mincount = 1
samples = readfiles(options.indir, mincount, 1)
uniq = True
outfile = os.path.join(options.outdir, "lenDist")
drawLenDist(samples, outfile, uniq)
outfileRead = os.path.join(options.outdir, "lenDist-read")
drawLenDist(samples, outfileRead, not uniq)
if __name__ == '__main__':
main()
| mit |
alan-mnix/MLFinalProject | mlp.py | 1 | 7596 | import numpy as np
import warnings
from itertools import cycle, izip
from sklearn.utils import gen_even_slices
from sklearn.utils import shuffle
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelBinarizer
from sklearn.lda import LDA
def _softmax(x):
np.exp(x, x)
x /= np.sum(x, axis=1)[:, np.newaxis]
def _tanh(x):
np.tanh(x, x)
def _dtanh(x):
"""Derivative of tanh as a function of tanh."""
x *= -x
x += 1
class BaseMLP(BaseEstimator):
"""Base class for estimators base on multi layer
perceptrons."""
def __init__(self, n_hidden, lr, l2decay, loss, output_layer, batch_size, verbose=0):
self.n_hidden = n_hidden
self.lr = lr
self.l2decay = l2decay
self.loss = loss
self.batch_size = batch_size
self.verbose = verbose
# check compatibility of loss and output layer:
if output_layer=='softmax' and loss!='cross_entropy':
raise ValueError('Softmax output is only supported '+
'with cross entropy loss function.')
if output_layer!='softmax' and loss=='cross_entropy':
raise ValueError('Cross-entropy loss is only ' +
'supported with softmax output layer.')
# set output layer and loss function
if output_layer=='linear':
self.output_func = id
elif output_layer=='softmax':
self.output_func = _softmax
elif output_layer=='tanh':
self.output_func = _tanh
else:
raise ValueError("'output_layer' must be one of "+
"'linear', 'softmax' or 'tanh'.")
if not loss in ['cross_entropy', 'square', 'crammer_singer']:
raise ValueError("'loss' must be one of " +
"'cross_entropy', 'square' or 'crammer_singer'.")
self.loss = loss
def fit(self, X, y, max_epochs, shuffle_data, verbose=0):
# get all sizes
n_samples, n_features = X.shape
if y.shape[0] != n_samples:
raise ValueError("Shapes of X and y don't fit.")
self.n_outs = y.shape[1]
#n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_batches = n_samples / self.batch_size
if n_samples % self.batch_size != 0:
warnings.warn("Discarding some samples: \
sample size not divisible by chunk size.")
n_iterations = int(max_epochs * n_batches)
if shuffle_data:
X, y = shuffle(X, y)
# generate batch slices
batch_slices = list(gen_even_slices(n_batches * self.batch_size, n_batches))
# generate weights.
# TODO: smart initialization
self.weights1_ = np.random.uniform(size=(n_features, self.n_hidden))/np.sqrt(n_features)
self.bias1_ = np.zeros(self.n_hidden)
self.weights2_ = np.random.uniform(size=(self.n_hidden, self.n_outs))/np.sqrt(self.n_hidden)
self.bias2_ = np.zeros(self.n_outs)
# preallocate memory
x_hidden = np.empty((self.batch_size, self.n_hidden))
delta_h = np.empty((self.batch_size, self.n_hidden))
x_output = np.empty((self.batch_size, self.n_outs))
delta_o = np.empty((self.batch_size, self.n_outs))
# main loop
for i, batch_slice in izip(xrange(n_iterations), cycle(batch_slices)):
self._forward(i, X, batch_slice, x_hidden, x_output)
self._backward(i, X, y, batch_slice, x_hidden, x_output, delta_o, delta_h)
return self
def predict(self, X):
n_samples = X.shape[0]
x_hidden = np.empty((n_samples, self.n_hidden))
x_output = np.empty((n_samples, self.n_outs))
self._forward(None, X, slice(0, n_samples), x_hidden, x_output)
return x_output
def _forward(self, i, X, batch_slice, x_hidden, x_output):
"""Do a forward pass through the network"""
x_hidden[:] = np.dot(X[batch_slice], self.weights1_)
x_hidden += self.bias1_
np.tanh(x_hidden, x_hidden)
x_output[:] = np.dot(x_hidden, self.weights2_)
x_output += self.bias2_
# apply output nonlinearity (if any)
self.output_func(x_output)
def _backward(self, i, X, y, batch_slice, x_hidden, x_output, delta_o, delta_h):
"""Do a backward pass through the network and update the weights"""
# calculate derivative of output layer
if self.loss in ['cross_entropy'] or (self.loss == 'square' and self.output_func == id):
delta_o[:] = y[batch_slice] - x_output
elif self.loss == 'crammer_singer':
raise ValueError("Not implemented yet.")
delta_o[:] = 0
delta_o[y[batch_slice], np.ogrid[len(batch_slice)]] -= 1
delta_o[np.argmax(x_output - np.ones((1))[y[batch_slice], np.ogrid[len(batch_slice)]], axis=1), np.ogrid[len(batch_slice)]] += 1
elif self.loss == 'square' and self.output_func == _tanh:
delta_o[:] = (y[batch_slice] - x_output) * _dtanh(x_output)
else:
raise ValueError("Unknown combination of output function and error.")
if self.verbose > 0:
print(np.linalg.norm(delta_o / self.batch_size))
delta_h[:] = np.dot(delta_o, self.weights2_.T)
# update weights
self.weights2_ += self.lr / self.batch_size * np.dot(x_hidden.T, delta_o)
self.bias2_ += self.lr * np.mean(delta_o, axis=0)
self.weights1_ += self.lr / self.batch_size * np.dot(X[batch_slice].T, delta_h)
self.bias1_ += self.lr * np.mean(delta_h, axis=0)
class MLPClassifier(BaseMLP, ClassifierMixin):
""" Multilayer Perceptron Classifier.
Uses a neural network with one hidden layer.
Parameters
----------
Attributes
----------
Notes
-----
References
----------"""
def __init__(self, n_hidden=200, lr=0.1, l2decay=0, loss='cross_entropy',
output_layer='softmax', batch_size=100, verbose=0):
super(MLPClassifier, self).__init__(n_hidden, lr, l2decay, loss,
output_layer, batch_size, verbose)
def fit(self, X, y, max_epochs=10, shuffle_data=False):
self.lb = LabelBinarizer()
one_hot_labels = self.lb.fit_transform(y)
super(MLPClassifier, self).fit(
X, one_hot_labels, max_epochs,
shuffle_data)
return self
def predict(self, X):
prediction = super(MLPClassifier, self).predict(X)
return self.lb.inverse_transform(prediction)
def test_classification():
from read import read
import numpy, tfidf
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
m, files = read("training.json")
y_map = [str(file["topic"]) for file in files]
map = []
for i in range(len(y_map)):
if(len(map) == 0 or not map.__contains__(y_map[i])):
map.append(y_map[i])
y = numpy.array([map.index(y_map[i]) for i in range(len(y_map))])
print("Construindo TF-IDF...")
X, vectorizer = tfidf.vectorizeTFIDF(files)
print X.shape
print("Performing dimensionality reduction using LDA...")
lda = LDA(n_components=9)
X = X.toarray()
lda.fit(X, y)
X = lda.transform(X)
mlp = MLPClassifier()
mlp.fit(X, y)
training_score = mlp.score(X, y)
print("training accuracy: %f" % training_score)
#assert(training_score > .95)
if __name__ == "__main__":
test_classification() | gpl-2.0 |
hammerlab/cohorts | test/test_basic.py | 1 | 2941 | # Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from . import data_path, generated_data_path, DATA_DIR
from .data_generate import generate_vcfs
from cohorts import Cohort, Patient
from cohorts.utils import InvalidDataError
import pandas as pd
from nose.tools import raises, eq_, ok_
def make_simple_clinical_dataframe(
os_list=None,
pfs_list=None,
deceased_list=None,
progressed_or_deceased_list=None):
return pd.DataFrame({"id": ["1", "4", "5"],
"age": [15, 20, 25],
"OS": [100, 150, 120] if os_list is None else os_list,
"PFS": [50, 40, 120] if pfs_list is None else pfs_list,
"deceased": [True, False, False] if deceased_list is None else deceased_list,
"progressed_or_deceased": [True, True, False] if progressed_or_deceased_list is None else progressed_or_deceased_list})
def make_simple_cohort(merge_type="union",
**kwargs):
clinical_dataframe = make_simple_clinical_dataframe(**kwargs)
patients = []
for i, row in clinical_dataframe.iterrows():
row = dict(row)
patient = Patient(id=row.pop("id"),
os=row.pop("OS"),
pfs=row.pop("PFS"),
deceased=row.pop("deceased"),
progressed_or_deceased=row.pop("progressed_or_deceased"),
additional_data=row)
patients.append(patient)
Cohort.normalized_per_mb = False
cohort = Cohort(
patients=patients,
responder_pfs_equals_os=True,
merge_type=merge_type,
cache_dir=generated_data_path("cache"))
return cohort
def test_pfs_equal_to_os():
# Should not error
make_simple_cohort(pfs_list=[100, 150, 120])
@raises(InvalidDataError)
def test_pfs_greater_than_os():
make_simple_cohort(pfs_list=[120, 150, 120])
@raises(InvalidDataError)
def test_progressed_vs_pfs():
make_simple_cohort(progressed_or_deceased_list=[True, False, False])
def test_simple_cohort():
cohort = make_simple_cohort()
eq_(len(cohort.as_dataframe()), 3)
columns = set(cohort.as_dataframe().columns)
ok_("patient_id" in columns)
ok_("age" in columns)
ok_("pfs" in columns)
ok_("os" in columns)
| apache-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/algorithms.py | 1 | 63584 | """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from textwrap import dedent
from typing import Dict
from warnings import catch_warnings, simplefilter, warn
import numpy as np
from pandas._libs import algos, hashtable as htable, lib
from pandas._libs.tslib import iNaT
from pandas.util._decorators import Appender, Substitution, deprecate_kwarg
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
maybe_promote,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_object,
ensure_platform_int,
ensure_uint64,
is_array_like,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetimelike,
is_extension_array_dtype,
is_float_dtype,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_period_dtype,
is_scalar,
is_signed_integer_dtype,
is_sparse,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna, na_value_for_dtype
from pandas.core import common as com
from pandas.core.indexers import validate_indices
_shared_docs = {} # type: Dict[str, str]
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return ensure_object(np.asarray(values)), "object", "object"
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos support uint8 directly (see TODO)
return np.asarray(values).astype("uint64"), "bool", "uint64"
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return ensure_int64(values), "int64", "int64"
elif is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype):
return ensure_uint64(values), "uint64", "uint64"
elif is_float_dtype(values) or is_float_dtype(dtype):
return ensure_float64(values), "float64", "float64"
elif is_object_dtype(values) and dtype is None:
return ensure_object(np.asarray(values)), "object", "object"
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings():
simplefilter("ignore", np.ComplexWarning)
values = ensure_float64(values)
return values, "float64", "float64"
except (TypeError, ValueError, OverflowError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return ensure_object(values), "object", "object"
# datetimelike
if (
needs_i8_conversion(values)
or is_period_dtype(dtype)
or is_datetime64_any_dtype(dtype)
or is_timedelta64_dtype(dtype)
):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
if values.ndim > 1 and is_datetime64_ns_dtype(values):
# Avoid calling the DatetimeIndex constructor as it is 1D only
# Note: this is reached by DataFrame.rank calls GH#27027
asi8 = values.view("i8")
dtype = values.dtype
return asi8, dtype, "int64"
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, "int64"
elif is_categorical_dtype(values) and (
is_categorical_dtype(dtype) or dtype is None
):
values = getattr(values, "values", values)
values = values.codes
dtype = "category"
# we are actually coercing to int64
# until our algos support int* directly (not all do)
values = ensure_int64(values)
return values, dtype, "int64"
# we have failed, return object
values = np.asarray(values, dtype=np.object)
return ensure_object(values), "object", "object"
def _reconstruct_data(values, dtype, original):
"""
reverse of _ensure_data
Parameters
----------
values : ndarray
dtype : pandas_dtype
original : ndarray-like
Returns
-------
Index for extension types, otherwise ndarray casted to dtype
"""
from pandas import Index
if is_extension_array_dtype(dtype):
values = dtype.construct_array_type()._from_sequence(values)
elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
values = values.astype(dtype)
# we only support object dtypes bool Index
if isinstance(original, Index):
values = values.astype(object)
elif dtype is not None:
values = values.astype(dtype)
return values
def _ensure_arraylike(values):
"""
ensure that we are arraylike if not already
"""
if not is_array_like(values):
inferred = lib.infer_dtype(values, skipna=False)
if inferred in ["mixed", "string", "unicode"]:
if isinstance(values, tuple):
values = list(values)
values = construct_1d_object_array_from_listlike(values)
else:
values = np.asarray(values)
return values
_hashtables = {
"float64": (htable.Float64HashTable, htable.Float64Vector),
"uint64": (htable.UInt64HashTable, htable.UInt64Vector),
"int64": (htable.Int64HashTable, htable.Int64Vector),
"string": (htable.StringHashTable, htable.ObjectVector),
"object": (htable.PyObjectHashTable, htable.ObjectVector),
}
def _get_hashtable_algo(values):
"""
Parameters
----------
values : arraylike
Returns
-------
tuples(hashtable class,
vector class,
values,
dtype,
ndtype)
"""
values, dtype, ndtype = _ensure_data(values)
if ndtype == "object":
# it's cheaper to use a String Hash Table than Object; we infer
# including nulls because that is the only difference between
# StringHashTable and ObjectHashtable
if lib.infer_dtype(values, skipna=False) in ["string"]:
ndtype = "string"
else:
ndtype = "object"
htable, table = _hashtables[ndtype]
return (htable, table, values, dtype, ndtype)
def _get_data_algo(values, func_map):
if is_categorical_dtype(values):
values = values._values_for_rank()
values, dtype, ndtype = _ensure_data(values)
if ndtype == "object":
# it's cheaper to use a String Hash Table than Object; we infer
# including nulls because that is the only difference between
# StringHashTable and ObjectHashtable
if lib.infer_dtype(values, skipna=False) in ["string"]:
ndtype = "string"
f = func_map.get(ndtype, func_map["object"])
return f, values
# --------------- #
# top-level algos #
# --------------- #
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com.asarray_tuplesafe(values)
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
to_match, _, _ = _ensure_data(to_match, dtype)
table = htable(min(len(to_match), 1000000))
table.map_locations(values)
result = table.lookup(to_match)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas import Series
result = Series(result.ravel()).replace(-1, na_sentinel)
result = result.values.reshape(result.shape)
return result
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique. Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
numpy.ndarray or ExtensionArray
The return can be:
* Index : when the input is an Index
* Categorical : when the input is a Categorical dtype
* ndarray : when the input is a Series/ndarray
Return numpy.ndarray or ExtensionArray.
See Also
--------
Index.unique
Series.unique
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp('20160101'),
... pd.Timestamp('20160101')]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
dtype=object)
>>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
... dtype='datetime64[ns, US/Eastern]', freq=None)
>>> pd.unique(list('baabc'))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'))))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'))))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)))
[b, a, c]
Categories (3, object): [a < b < c]
An array of tuples
>>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
"""
values = _ensure_arraylike(values)
if is_extension_array_dtype(values):
# Dispatch to extension dtype's unique.
return values.unique()
original = values
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, dtype, original)
return uniques
unique1d = unique
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps : array-like
values : array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{comps_type}]".format(
comps_type=type(comps).__name__
)
)
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(
values_type=type(values).__name__
)
)
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = construct_1d_object_array_from_listlike(list(values))
if is_categorical_dtype(comps):
# TODO(extension)
# handle categoricals
return comps._values.isin(values)
comps = com.values_from_object(comps)
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if len(comps) > 1000000 and not is_object_dtype(comps):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype("int64", copy=False)
comps = comps.astype("int64", copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError, OverflowError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype("float64", copy=False)
comps = comps.astype("float64", copy=False)
f = lambda x, y: htable.ismember_float64(x, y)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
def _factorize_array(values, na_sentinel=-1, size_hint=None, na_value=None):
"""Factorize an array-like to labels and uniques.
This doesn't do any coercion of types or unboxing before factorization.
Parameters
----------
values : ndarray
na_sentinel : int, default -1
size_hint : int, optional
Passsed through to the hashtable's 'get_labels' method
na_value : object, optional
A value in `values` to consider missing. Note: only use this
parameter when you know that you don't have any values pandas would
consider missing in the array (NaN for float data, iNaT for
datetimes, etc.).
Returns
-------
labels, uniques : ndarray
"""
(hash_klass, _), values = _get_data_algo(values, _hashtables)
table = hash_klass(size_hint or len(values))
uniques, labels = table.factorize(
values, na_sentinel=na_sentinel, na_value=na_value
)
labels = ensure_platform_int(labels)
return labels, uniques
_shared_docs[
"factorize"
] = """
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values. `factorize`
is available as both a top-level function :func:`pandas.factorize`,
and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
Parameters
----------
%(values)s%(sort)s%(order)s
na_sentinel : int, default -1
Value to mark "not found".
%(size_hint)s\
Returns
-------
labels : ndarray
An integer ndarray that's an indexer into `uniques`.
``uniques.take(labels)`` will have the same values as `values`.
uniques : ndarray, Index, or Categorical
The unique valid values. When `values` is Categorical, `uniques`
is a Categorical. When `values` is some other pandas object, an
`Index` is returned. Otherwise, a 1-D ndarray is returned.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
See Also
--------
cut : Discretize continuous-valued array.
unique : Find the unique value in an array.
Examples
--------
These examples all show factorize as a top-level method like
``pd.factorize(values)``. The results are identical for methods like
:meth:`Series.factorize`.
>>> labels, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'])
>>> labels
array([0, 0, 1, 2, 0])
>>> uniques
array(['b', 'a', 'c'], dtype=object)
With ``sort=True``, the `uniques` will be sorted, and `labels` will be
shuffled so that the relationship is the maintained.
>>> labels, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'], sort=True)
>>> labels
array([1, 1, 0, 2, 1])
>>> uniques
array(['a', 'b', 'c'], dtype=object)
Missing values are indicated in `labels` with `na_sentinel`
(``-1`` by default). Note that missing values are never
included in `uniques`.
>>> labels, uniques = pd.factorize(['b', None, 'a', 'c', 'b'])
>>> labels
array([ 0, -1, 1, 2, 0])
>>> uniques
array(['b', 'a', 'c'], dtype=object)
Thus far, we've only factorized lists (which are internally coerced to
NumPy arrays). When factorizing pandas objects, the type of `uniques`
will differ. For Categoricals, a `Categorical` is returned.
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
>>> labels, uniques = pd.factorize(cat)
>>> labels
array([0, 0, 1])
>>> uniques
[a, c]
Categories (3, object): [a, b, c]
Notice that ``'b'`` is in ``uniques.categories``, despite not being
present in ``cat.values``.
For all other pandas objects, an Index of the appropriate type is
returned.
>>> cat = pd.Series(['a', 'a', 'c'])
>>> labels, uniques = pd.factorize(cat)
>>> labels
array([0, 0, 1])
>>> uniques
Index(['a', 'c'], dtype='object')
"""
@Substitution(
values=dedent(
"""\
values : sequence
A 1-D sequence. Sequences that aren't pandas objects are
coerced to ndarrays before factorization.
"""
),
order=dedent(
"""\
order : None
.. deprecated:: 0.23.0
This parameter has no effect and is deprecated.
"""
),
sort=dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""
),
size_hint=dedent(
"""\
size_hint : int, optional
Hint to the hashtable sizer.
"""
),
)
@Appender(_shared_docs["factorize"])
@deprecate_kwarg(old_arg_name="order", new_arg_name=None)
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
# Implementation notes: This method is responsible for 3 things
# 1.) coercing data to array-like (ndarray, Index, extension array)
# 2.) factorizing labels and uniques
# 3.) Maybe boxing the output in an Index
#
# Step 2 is dispatched to extension types (like Categorical). They are
# responsible only for factorization. All data coercion, sorting and boxing
# should happen here.
values = _ensure_arraylike(values)
original = values
if is_extension_array_dtype(values):
values = getattr(values, "_values", values)
labels, uniques = values.factorize(na_sentinel=na_sentinel)
dtype = original.dtype
else:
values, dtype, _ = _ensure_data(values)
if (
is_datetime64_any_dtype(original)
or is_timedelta64_dtype(original)
or is_period_dtype(original)
):
na_value = na_value_for_dtype(original.dtype)
else:
na_value = None
labels, uniques = _factorize_array(
values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value
)
if sort and len(uniques) > 0:
from pandas.core.sorting import safe_sort
uniques, labels = safe_sort(
uniques, labels, na_sentinel=na_sentinel, assume_unique=True, verify=False
)
uniques = _reconstruct_data(uniques, dtype, original)
# return original tenor
if isinstance(original, ABCIndexClass):
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return labels, uniques
def value_counts(
values, sort=True, ascending=False, normalize=False, bins=None, dropna=True
):
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
normalize: boolean, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series, Index
name = getattr(values, "name", None)
if bins is not None:
try:
from pandas.core.reshape.tile import cut
values = Series(values)
ii = cut(values, bins, include_lowest=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
result = result[result.index.notna()]
result.index = result.index.astype("interval")
result = result.sort_index()
# if we are dropna and we have NO values
if dropna and (result.values == 0).all():
result = result.iloc[0:0]
# normalizing is by len of all (regardless of dropna)
counts = np.array([len(ii)])
else:
if is_extension_array_dtype(values) or is_sparse(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
result.name = name
counts = result.values
else:
keys, counts = _value_counts_arraylike(values, dropna)
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(counts.sum())
return result
def _value_counts_arraylike(values, dropna):
"""
Parameters
----------
values : arraylike
dropna : boolean
Returns
-------
(uniques, counts)
"""
values = _ensure_arraylike(values)
original = values
values, dtype, ndtype = _ensure_data(values)
if needs_i8_conversion(dtype):
# i8
keys, counts = htable.value_count_int64(values, dropna)
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
else:
# ndarray like
# TODO: handle uint8
f = getattr(htable, "value_count_{dtype}".format(dtype=ndtype))
keys, counts = f(values, dropna)
mask = isna(values)
if not dropna and mask.any():
if not isna(keys).any():
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
keys = _reconstruct_data(keys, original.dtype, original)
return keys, counts
def duplicated(values, keep="first"):
"""
Return boolean ndarray denoting duplicate values.
.. versionadded:: 0.19.0
Parameters
----------
values : ndarray-like
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray
"""
values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype))
return f(values, keep=keep)
def mode(values, dropna=True):
"""
Returns the mode(s) of an array.
Parameters
----------
values : array-like
Array over which to check for duplicate values.
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
mode : Series
"""
from pandas import Series
values = _ensure_arraylike(values)
original = values
# categorical is a fast-path
if is_categorical_dtype(values):
if isinstance(values, Series):
return Series(values.values.mode(dropna=dropna), name=values.name)
return values.mode(dropna=dropna)
if dropna and is_datetimelike(values):
mask = values.isnull()
values = values[~mask]
values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "mode_{dtype}".format(dtype=ndtype))
result = f(values, dropna=dropna)
try:
result = np.sort(result)
except TypeError as e:
warn("Unable to sort modes: {error}".format(error=e))
result = _reconstruct_data(result, original.dtype, original)
return Series(result)
def rank(values, axis=0, method="average", na_option="keep", ascending=True, pct=False):
"""
Rank the values along a given axis.
Parameters
----------
values : array-like
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : boolean, default True
Whether or not the elements should be ranked in ascending order.
pct : boolean, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(
values,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(
values,
axis=axis,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : boolean array or None
array indicating which elements to exclude from checking
b_mask : boolean array or boolean or None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = np.broadcast_to(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = np.broadcast_to(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
else:
to_raise = (
((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any()
or (
(np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]
).any()
)
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b
_rank1d_functions = {
"float64": algos.rank_1d_float64,
"int64": algos.rank_1d_int64,
"uint64": algos.rank_1d_uint64,
"object": algos.rank_1d_object,
}
_rank2d_functions = {
"float64": algos.rank_2d_float64,
"int64": algos.rank_2d_int64,
"uint64": algos.rank_2d_uint64,
"object": algos.rank_2d_object,
}
def quantile(x, q, interpolation_method="fraction"):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == "fraction":
score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1)
elif interpolation_method == "lower":
score = values[np.floor(idx)]
elif interpolation_method == "higher":
score = values[np.ceil(idx)]
else:
raise ValueError(
"interpolation_method can only be 'fraction' "
", 'lower' or 'higher'"
)
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
# --------------- #
# select n #
# --------------- #
class SelectN:
def __init__(self, obj, n, keep):
self.obj = obj
self.n = n
self.keep = keep
if self.keep not in ("first", "last", "all"):
raise ValueError('keep must be either "first", "last" or "all"')
def nlargest(self):
return self.compute("nlargest")
def nsmallest(self):
return self.compute("nsmallest")
@staticmethod
def is_valid_dtype_n_method(dtype):
"""
Helper function to determine if dtype is valid for
nsmallest/nlargest methods
"""
return (
is_numeric_dtype(dtype) and not is_complex_dtype(dtype)
) or needs_i8_conversion(dtype)
class SelectNSeries(SelectN):
"""
Implement n largest/smallest for Series
Parameters
----------
obj : Series
n : int
keep : {'first', 'last'}, default 'first'
Returns
-------
nordered : Series
"""
def compute(self, method):
n = self.n
dtype = self.obj.dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError(
"Cannot use method '{method}' with "
"dtype {dtype}".format(method=method, dtype=dtype)
)
if n <= 0:
return self.obj[[]]
dropped = self.obj.dropna()
# slow method
if n >= len(self.obj):
reverse_it = self.keep == "last" or method == "nlargest"
ascending = method == "nsmallest"
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
# fast method
arr, pandas_dtype, _ = _ensure_data(dropped.values)
if method == "nlargest":
arr = -arr
if is_integer_dtype(pandas_dtype):
# GH 21426: ensure reverse ordering at boundaries
arr -= 1
elif is_bool_dtype(pandas_dtype):
# GH 26154: ensure False is smaller than True
arr = 1 - (-arr)
if self.keep == "last":
arr = arr[::-1]
narr = len(arr)
n = min(n, narr)
kth_val = algos.kth_smallest(arr.copy(), n - 1)
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind="mergesort")]
if self.keep != "all":
inds = inds[:n]
if self.keep == "last":
# reverse indices
inds = narr - 1 - inds
return dropped.iloc[inds]
class SelectNFrame(SelectN):
"""
Implement n largest/smallest for DataFrame
Parameters
----------
obj : DataFrame
n : int
keep : {'first', 'last'}, default 'first'
columns : list or str
Returns
-------
nordered : DataFrame
"""
def __init__(self, obj, n, keep, columns):
super().__init__(obj, n, keep)
if not is_list_like(columns) or isinstance(columns, tuple):
columns = [columns]
columns = list(columns)
self.columns = columns
def compute(self, method):
from pandas import Int64Index
n = self.n
frame = self.obj
columns = self.columns
for column in columns:
dtype = frame[column].dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError(
(
"Column {column!r} has dtype {dtype}, cannot use method "
"{method!r} with this dtype"
).format(column=column, dtype=dtype, method=method)
)
def get_indexer(current_indexer, other_indexer):
"""Helper function to concat `current_indexer` and `other_indexer`
depending on `method`
"""
if method == "nsmallest":
return current_indexer.append(other_indexer)
else:
return other_indexer.append(current_indexer)
# Below we save and reset the index in case index contains duplicates
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
indexer = Int64Index([])
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
# If it's the last column or if we have the number of
# results desired we are done.
# Otherwise there are duplicates of the largest/smallest
# value and we need to look at the rest of the columns
# to determine which of the rows with the largest/smallest
# value in the column to keep.
series = cur_frame[column]
is_last_column = len(columns) - 1 == i
values = getattr(series, method)(
cur_n, keep=self.keep if is_last_column else "all"
)
if is_last_column or len(values) <= cur_n:
indexer = get_indexer(indexer, values.index)
break
# Now find all values which are equal to
# the (nsmallest: largest)/(nlarrgest: smallest)
# from our series.
border_value = values == values[values.index[-1]]
# Some of these values are among the top-n
# some aren't.
unsafe_values = values[border_value]
# These values are definitely among the top-n
safe_values = values[~border_value]
indexer = get_indexer(indexer, safe_values.index)
# Go on and separate the unsafe_values on the remaining
# columns.
cur_frame = cur_frame.loc[unsafe_values.index]
cur_n = n - len(indexer)
frame = frame.take(indexer)
# Restore the index on frame
frame.index = original_index.take(indexer)
# If there is only one column, the frame is already sorted.
if len(columns) == 1:
return frame
ascending = method == "nsmallest"
return frame.sort_values(columns, ascending=ascending, kind="mergesort")
# ------- ## ---- #
# take #
# ---- #
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_object(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_object(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
("int8", "int8"): algos.take_1d_int8_int8,
("int8", "int32"): algos.take_1d_int8_int32,
("int8", "int64"): algos.take_1d_int8_int64,
("int8", "float64"): algos.take_1d_int8_float64,
("int16", "int16"): algos.take_1d_int16_int16,
("int16", "int32"): algos.take_1d_int16_int32,
("int16", "int64"): algos.take_1d_int16_int64,
("int16", "float64"): algos.take_1d_int16_float64,
("int32", "int32"): algos.take_1d_int32_int32,
("int32", "int64"): algos.take_1d_int32_int64,
("int32", "float64"): algos.take_1d_int32_float64,
("int64", "int64"): algos.take_1d_int64_int64,
("int64", "float64"): algos.take_1d_int64_float64,
("float32", "float32"): algos.take_1d_float32_float32,
("float32", "float64"): algos.take_1d_float32_float64,
("float64", "float64"): algos.take_1d_float64_float64,
("object", "object"): algos.take_1d_object_object,
("bool", "bool"): _view_wrapper(algos.take_1d_bool_bool, np.uint8, np.uint8),
("bool", "object"): _view_wrapper(algos.take_1d_bool_object, np.uint8, None),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
algos.take_1d_int64_int64, np.int64, np.int64, np.int64
),
}
_take_2d_axis0_dict = {
("int8", "int8"): algos.take_2d_axis0_int8_int8,
("int8", "int32"): algos.take_2d_axis0_int8_int32,
("int8", "int64"): algos.take_2d_axis0_int8_int64,
("int8", "float64"): algos.take_2d_axis0_int8_float64,
("int16", "int16"): algos.take_2d_axis0_int16_int16,
("int16", "int32"): algos.take_2d_axis0_int16_int32,
("int16", "int64"): algos.take_2d_axis0_int16_int64,
("int16", "float64"): algos.take_2d_axis0_int16_float64,
("int32", "int32"): algos.take_2d_axis0_int32_int32,
("int32", "int64"): algos.take_2d_axis0_int32_int64,
("int32", "float64"): algos.take_2d_axis0_int32_float64,
("int64", "int64"): algos.take_2d_axis0_int64_int64,
("int64", "float64"): algos.take_2d_axis0_int64_float64,
("float32", "float32"): algos.take_2d_axis0_float32_float32,
("float32", "float64"): algos.take_2d_axis0_float32_float64,
("float64", "float64"): algos.take_2d_axis0_float64_float64,
("object", "object"): algos.take_2d_axis0_object_object,
("bool", "bool"): _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8, np.uint8),
("bool", "object"): _view_wrapper(algos.take_2d_axis0_bool_object, np.uint8, None),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
algos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
_take_2d_axis1_dict = {
("int8", "int8"): algos.take_2d_axis1_int8_int8,
("int8", "int32"): algos.take_2d_axis1_int8_int32,
("int8", "int64"): algos.take_2d_axis1_int8_int64,
("int8", "float64"): algos.take_2d_axis1_int8_float64,
("int16", "int16"): algos.take_2d_axis1_int16_int16,
("int16", "int32"): algos.take_2d_axis1_int16_int32,
("int16", "int64"): algos.take_2d_axis1_int16_int64,
("int16", "float64"): algos.take_2d_axis1_int16_float64,
("int32", "int32"): algos.take_2d_axis1_int32_int32,
("int32", "int64"): algos.take_2d_axis1_int32_int64,
("int32", "float64"): algos.take_2d_axis1_int32_float64,
("int64", "int64"): algos.take_2d_axis1_int64_int64,
("int64", "float64"): algos.take_2d_axis1_int64_float64,
("float32", "float32"): algos.take_2d_axis1_float32_float32,
("float32", "float64"): algos.take_2d_axis1_float32_float64,
("float64", "float64"): algos.take_2d_axis1_float64_float64,
("object", "object"): algos.take_2d_axis1_object_object,
("bool", "bool"): _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8, np.uint8),
("bool", "object"): _view_wrapper(algos.take_2d_axis1_bool_object, np.uint8, None),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
algos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
_take_2d_multi_dict = {
("int8", "int8"): algos.take_2d_multi_int8_int8,
("int8", "int32"): algos.take_2d_multi_int8_int32,
("int8", "int64"): algos.take_2d_multi_int8_int64,
("int8", "float64"): algos.take_2d_multi_int8_float64,
("int16", "int16"): algos.take_2d_multi_int16_int16,
("int16", "int32"): algos.take_2d_multi_int16_int32,
("int16", "int64"): algos.take_2d_multi_int16_int64,
("int16", "float64"): algos.take_2d_multi_int16_float64,
("int32", "int32"): algos.take_2d_multi_int32_int32,
("int32", "int64"): algos.take_2d_multi_int32_int64,
("int32", "float64"): algos.take_2d_multi_int32_float64,
("int64", "int64"): algos.take_2d_multi_int64_int64,
("int64", "float64"): algos.take_2d_multi_int64_float64,
("float32", "float32"): algos.take_2d_multi_float32_float32,
("float32", "float64"): algos.take_2d_multi_float32_float64,
("float64", "float64"): algos.take_2d_multi_float64_float64,
("object", "object"): algos.take_2d_multi_object_object,
("bool", "bool"): _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8, np.uint8),
("bool", "object"): _view_wrapper(algos.take_2d_multi_bool_object, np.uint8, None),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
algos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
indexer = ensure_int64(indexer)
_take_nd_object(
arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info
)
return func
def take(arr, indices, axis=0, allow_fill=False, fill_value=None):
"""
Take elements from an array.
.. versionadded:: 0.23.0
Parameters
----------
arr : sequence
Non array-likes (sequences without a dtype) are coerced
to an ndarray.
indices : sequence of integers
Indices to be taken.
axis : int, default 0
The axis over which to select values.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to :func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type (``self.dtype.na_value``) is used.
For multi-dimensional `arr`, each *element* is filled with
`fill_value`.
Returns
-------
ndarray or ExtensionArray
Same type as the input.
Raises
------
IndexError
When `indices` is out of bounds for the array.
ValueError
When the indexer contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
When `allow_fill` is False, `indices` may be whatever dimensionality
is accepted by NumPy for `arr`.
When `allow_fill` is True, `indices` should be 1-D.
See Also
--------
numpy.take
Examples
--------
>>> from pandas.api.extensions import take
With the default ``allow_fill=False``, negative numbers indicate
positional indices from the right.
>>> take(np.array([10, 20, 30]), [0, 0, -1])
array([10, 10, 30])
Setting ``allow_fill=True`` will place `fill_value` in those positions.
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
array([10., 10., nan])
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
... fill_value=-10)
array([ 10, 10, -10])
"""
if not is_array_like(arr):
arr = np.asarray(arr)
indices = np.asarray(indices, dtype=np.intp)
if allow_fill:
# Pandas style, -1 means NA
validate_indices(indices, arr.shape[axis])
result = take_1d(
arr, indices, axis=axis, allow_fill=True, fill_value=fill_value
)
else:
# NumPy style
result = arr.take(indices, axis=axis)
return result
def take_nd(
arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, allow_fill=True
):
"""
Specialized Cython take which sets NaN values in one pass
This dispatches to ``take`` defined on ExtensionArrays. It does not
currently dispatch to ``SparseArray.take`` for sparse ``arr``.
Parameters
----------
arr : array-like
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indices are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
_maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
Returns
-------
subarray : array-like
May be the same type as the input, or cast to an ndarray.
"""
# TODO(EA): Remove these if / elifs as datetimeTZ, interval, become EAs
# dispatch to internal type takes
if is_extension_array_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
elif is_datetime64tz_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
elif is_interval_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if is_sparse(arr):
arr = arr.to_dense()
elif isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr.values
arr = np.asarray(arr)
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = ensure_int64(indexer, copy=False)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError("Incompatible type for fill_value")
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order="F")
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(
arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(
arr, indexer, out=None, fill_value=np.nan, mask_info=None, allow_fill=True
):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError("Incompatible type for fill_value")
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_object(
arr, indexer, out, fill_value=fill_value, mask_info=mask_info
)
func(arr, indexer, out=out, fill_value=fill_value)
return out
# ------------ #
# searchsorted #
# ------------ #
def searchsorted(arr, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
.. versionadded:: 0.25.0
Find the indices into a sorted array `arr` (a) such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `arr` would be preserved.
Assuming that `arr` is sorted:
====== ================================
`side` returned index `i` satisfies
====== ================================
left ``arr[i-1] < value <= self[i]``
right ``arr[i-1] <= value < self[i]``
====== ================================
Parameters
----------
arr: array-like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
value : array_like
Values to insert into `arr`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted : Similar method from NumPy.
"""
if sorter is not None:
sorter = ensure_platform_int(sorter)
if (
isinstance(arr, np.ndarray)
and is_integer_dtype(arr)
and (is_integer(value) or is_integer_dtype(value))
):
from .arrays.array_ import array
# if `arr` and `value` have different dtypes, `arr` would be
# recast by numpy, causing a slow search.
# Before searching below, we therefore try to give `value` the
# same dtype as `arr`, while guarding against integer overflows.
iinfo = np.iinfo(arr.dtype.type)
value_arr = np.array([value]) if is_scalar(value) else np.array(value)
if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all():
# value within bounds, so no overflow, so can convert value dtype
# to dtype of arr
dtype = arr.dtype
else:
dtype = value_arr.dtype
if is_scalar(value):
value = dtype.type(value)
else:
value = array(value, dtype=dtype)
elif not (
is_object_dtype(arr) or is_numeric_dtype(arr) or is_categorical_dtype(arr)
):
from pandas.core.series import Series
# E.g. if `arr` is an array with dtype='datetime64[ns]'
# and `value` is a pd.Timestamp, we may need to convert value
value_ser = Series(value)._values
value = value_ser[0] if is_scalar(value) else value_ser
result = arr.searchsorted(value, side=side, sorter=sorter)
return result
# ---- #
# diff #
# ---- #
_diff_special = {
"float64": algos.diff_2d_float64,
"float32": algos.diff_2d_float32,
"int64": algos.diff_2d_int64,
"int32": algos.diff_2d_int32,
"int16": algos.diff_2d_int16,
"int8": algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
"""
difference of n between self,
analogous to s-s.shift(n)
Parameters
----------
arr : ndarray
n : int
number of periods
axis : int
axis to shift on
Returns
-------
shifted
"""
n = int(n)
na = np.nan
dtype = arr.dtype
is_timedelta = False
if needs_i8_conversion(arr):
dtype = np.float64
arr = arr.view("i8")
na = iNaT
is_timedelta = True
elif is_bool_dtype(dtype):
dtype = np.object_
elif is_integer_dtype(dtype):
dtype = np.float64
dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if is_timedelta:
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
if is_timedelta:
from pandas import TimedeltaIndex
out_arr = (
TimedeltaIndex(out_arr.ravel().astype("int64"))
.asi8.reshape(out_arr.shape)
.astype("timedelta64[ns]")
)
return out_arr
| apache-2.0 |
stylianos-kampakis/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
schets/scikit-learn | sklearn/grid_search.py | 4 | 34455 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import _check_cv as check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# get complete grid and yield from it
param_grid = list(ParameterGrid(self.param_distributions))
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class ChangedBehaviorWarning(UserWarning):
pass
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
degree=..., gamma=..., kernel='rbf', max_iter=-1,
probability=False, random_state=None, shrinking=True,
tol=..., verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, loss_func=None,
score_func=None, fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
expectocode/telegram-analysis | activehours.py | 2 | 4982 | #!/usr/bin/env python3
"""
A program to plot the activity of a chat over 24 hours
"""
import argparse
from json import loads
from datetime import date,timedelta,datetime
from os import path
from collections import defaultdict
import matplotlib.pyplot as plt
from sys import maxsize
def extract_info(event):
text_time = datetime.fromtimestamp(event['date']).hour
text_date = date.fromtimestamp(event['date'])
text_length = len(event['text'])
return text_date, text_time, text_length
def make_ddict_in_range(json_file,start,end):
"""
return a defaultdict(int) of dates with activity on those dates in a date range
"""
events = (loads(line) for line in json_file)
#generator, so whole file is not put in mem
msg_infos = (extract_info(event) for event in events if 'text' in event)
msg_infos = ((date,time,length) for (date,time,length) in msg_infos if date >= start and date <= end)
counter = defaultdict(int)
#a dict with hours as keys and frequency as values
for date_text,time_text,length in msg_infos:
counter[time_text] += length
return counter
def parse_args():
parser = argparse.ArgumentParser(
description="Visualise the most active times of day in a Telegram chat")
required = parser.add_argument_group('required arguments')
#https://stackoverflow.com/questions/24180527/argparse-required-arguments-listed-under-optional-arguments
required.add_argument(
'-f', '--file',
help='paths to the json file (chat log) to analyse.',
required = True
)
parser.add_argument(
'-o', '--output-folder',
help='the folder to save the activity graph image in.'
'Using this option will make the graph not display on screen.')
#parser.add_argument(
# '-b', '--bin-size',
# help='the number of days to group together as one datapoint. '
# 'Higher number is more smooth graph, lower number is more spiky. '
# 'Default 3.',
# type=int,default=3)
# #and negative bin sizes are = 1
parser.add_argument(
'-s','--figure-size',
help='the size of the figure shown or saved (X and Y size).'
'Choose an appropriate value for your screen size. Default 14 8.',
nargs=2,type=int,default=[14,8]
)
parser.add_argument(
'-d','--date-range',
help='the range of dates you want to look at data between. '
'Must be in format YYYY-MM-DD YYYY-MM-DD with the first date '
'the start of the range, and the second the end. Example: '
"-d '2017-11-20 2017-05-15'. Make sure you don't put a day "
'that is too high for the month eg 30th February.',
default="1000-01-01 4017-01-01"
#hopefully no chatlogs contain these dates :p
)
return parser.parse_args()
def save_figure(folder,filename):
if len(filename) > 200:
#file name likely to be so long as to cause issues
figname = input(
"This graph is going to have a very long file name. Please enter a custom name(no need to add an extension): ")
else:
figname = "Active hours in {}".format(filename)
plt.savefig("{}/{}.png".format(folder, figname))
def annotate_figure(filename):
plt.title("Active hours in {}".format(filename))
plt.ylabel("Activity level (chars)", size=14)
plt.xlabel("Hour of the day", size=14)
#sidenote: no idea what timezone lmao
plt.gca().set_xlim([0,24])
plt.xticks(([x+0.5 for x in range(24)]),range(24))
#if binsize > 1:
# plt.ylabel("Activity level (chars per {} days)".format(binsize), size=14)
#else:
# plt.ylabel("Activity level (chars per day)", size=14)
def get_dates(arg_dates):
if " " not in arg_dates:
print("You must put a space between start and end dates")
exit()
daterange = arg_dates.split()
start_date = datetime.strptime(daterange[0], "%Y-%m-%d").date()
end_date = datetime.strptime(daterange[1], "%Y-%m-%d").date()
return (start_date,end_date)
def main():
"""
main function
"""
args = parse_args()
filepath = args.file
savefolder = args.output_folder
figure_size = args.figure_size
start_date,end_date = get_dates(args.date_range)
filename = path.splitext(path.split(filepath)[-1])[0]
plt.figure(figsize=figure_size)
with open(filepath, 'r') as jsonfile:
chat_counter = make_ddict_in_range(
jsonfile,start_date,end_date)
plt.bar(*zip(*chat_counter.items()))
annotate_figure(filename)
if savefolder is not None:
#if there is a given folder to save the figure in, save it there
save_figure(savefolder,filename)
else:
#if a save folder was not specified, just open a window to display graph
plt.show()
if __name__ == "__main__":
main()
| mit |
BinRoot/TensorFlow-Book | ch07_autoencoder/main_imgs.py | 1 | 1819 | # https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
import cPickle
import numpy as np
from autoencoder import Autoencoder
#
# def grayscale(x):
# gray = np.zeros(len(x)/3)
# for i in range(len(x)/3):
# gray[i] = (x[i] + x[2*i] + x[3*i]) / 3
def grayscale(a):
return a.reshape(a.shape[0], 3, 32, 32).mean(1).reshape(a.shape[0], -1)
def unpickle(file):
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
names = unpickle('./cifar-10-batches-py/batches.meta')['label_names']
data, labels = [], []
for i in range(1, 6):
filename = './cifar-10-batches-py/data_batch_' + str(i)
batch_data = unpickle(filename)
if len(data) > 0:
data = np.vstack((data, batch_data['data']))
labels = np.vstack((labels, batch_data['labels']))
else:
data = batch_data['data']
labels = batch_data['labels']
data = grayscale(data)
x = np.matrix(data)
y = np.array(labels)
horse_indices = np.where(y == 7)[0]
horse_x = x[horse_indices]
print(np.shape(horse_x)) # (5000, 3072)
input_dim = np.shape(horse_x)[1]
hidden_dim = 100
ae = Autoencoder(input_dim, hidden_dim)
ae.train(horse_x)
test_data = unpickle('./cifar-10-batches-py/test_batch')
test_x = grayscale(test_data['data'])
test_labels = np.array(test_data['labels'])
encoding = ae.classify(test_x, test_labels)
encoding = np.matrix(encoding)
from matplotlib import pyplot as plt
# encoding = np.matrix(np.random.choice([0, 1], size=(hidden_dim,)))
original_img = np.reshape(test_x[7,:], (32,32))
plt.imshow(original_img, cmap='Greys_r')
plt.show()
print(np.size(encoding))
while(True):
img = ae.decode(encoding)
plt.imshow(img, cmap='Greys_r')
plt.show()
rand_idx = np.random.randint(np.size(encoding))
encoding[0,rand_idx] = np.random.randint(2)
| mit |
PatrickChrist/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
dmigo/incubator-superset | superset/dataframe.py | 1 | 5471 | # -*- coding: utf-8 -*-
# pylint: disable=C,R,W
""" Superset wrapper around pandas.DataFrame.
TODO(bkyryliuk): add support for the conventions like: *_dim or dim_*
dimensions, *_ts, ts_*, ds_*, *_ds - datetime, etc.
TODO(bkyryliuk): recognize integer encoded enums.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import date, datetime
import numpy as np
import pandas as pd
from pandas.core.common import _maybe_box_datetimelike
from pandas.core.dtypes.dtypes import ExtensionDtype
from past.builtins import basestring
from superset.utils import JS_MAX_INTEGER
INFER_COL_TYPES_THRESHOLD = 95
INFER_COL_TYPES_SAMPLE_SIZE = 100
class SupersetDataFrame(object):
# Mapping numpy dtype.char to generic database types
type_map = {
'b': 'BOOL', # boolean
'i': 'INT', # (signed) integer
'u': 'INT', # unsigned integer
'l': 'INT', # 64bit integer
'f': 'FLOAT', # floating-point
'c': 'FLOAT', # complex-floating point
'm': None, # timedelta
'M': 'DATETIME', # datetime
'O': 'OBJECT', # (Python) objects
'S': 'BYTE', # (byte-)string
'U': 'STRING', # Unicode
'V': None, # raw data (void)
}
def __init__(self, df):
self.__df = df.where((pd.notnull(df)), None)
@property
def size(self):
return len(self.__df.index)
@property
def data(self):
# work around for https://github.com/pandas-dev/pandas/issues/18372
data = [dict((k, _maybe_box_datetimelike(v))
for k, v in zip(self.__df.columns, np.atleast_1d(row)))
for row in self.__df.values]
for d in data:
for k, v in list(d.items()):
# if an int is too big for Java Script to handle
# convert it to a string
if isinstance(v, int):
if abs(v) > JS_MAX_INTEGER:
d[k] = str(v)
return data
@classmethod
def db_type(cls, dtype):
"""Given a numpy dtype, Returns a generic database type"""
if isinstance(dtype, ExtensionDtype):
return cls.type_map.get(dtype.kind)
return cls.type_map.get(dtype.char)
@classmethod
def datetime_conversion_rate(cls, data_series):
success = 0
total = 0
for value in data_series:
total += 1
try:
pd.to_datetime(value)
success += 1
except Exception:
continue
return 100 * success / total
@classmethod
def is_date(cls, dtype):
if dtype.name:
return dtype.name.startswith('datetime')
@classmethod
def is_dimension(cls, dtype, column_name):
if cls.is_id(column_name):
return False
return dtype.name in ('object', 'bool')
@classmethod
def is_id(cls, column_name):
return column_name.startswith('id') or column_name.endswith('id')
@classmethod
def agg_func(cls, dtype, column_name):
# consider checking for key substring too.
if cls.is_id(column_name):
return 'count_distinct'
if (issubclass(dtype.type, np.generic) and
np.issubdtype(dtype, np.number)):
return 'sum'
return None
@property
def columns(self):
"""Provides metadata about columns for data visualization.
:return: dict, with the fields name, type, is_date, is_dim and agg.
"""
if self.__df.empty:
return None
columns = []
sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.__df.index))
sample = self.__df
if sample_size:
sample = self.__df.sample(sample_size)
for col in self.__df.dtypes.keys():
col_db_type = self.db_type(self.__df.dtypes[col])
column = {
'name': col,
'agg': self.agg_func(self.__df.dtypes[col], col),
'type': col_db_type,
'is_date': self.is_date(self.__df.dtypes[col]),
'is_dim': self.is_dimension(self.__df.dtypes[col], col),
}
if column['type'] in ('OBJECT', None):
v = sample[col].iloc[0] if not sample[col].empty else None
if isinstance(v, basestring):
column['type'] = 'STRING'
elif isinstance(v, int):
column['type'] = 'INT'
elif isinstance(v, float):
column['type'] = 'FLOAT'
elif isinstance(v, (datetime, date)):
column['type'] = 'DATETIME'
column['is_date'] = True
column['is_dim'] = False
# check if encoded datetime
if (
column['type'] == 'STRING' and
self.datetime_conversion_rate(sample[col]) >
INFER_COL_TYPES_THRESHOLD):
column.update({
'is_date': True,
'is_dim': False,
'agg': None,
})
# 'agg' is optional attribute
if not column['agg']:
column.pop('agg', None)
columns.append(column)
return columns
| apache-2.0 |
Scaravex/clue-hackathon | src/clusters.py | 2 | 2247 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 19 15:45:16 2017
@author: mskara
"""
##labelling
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA as sklearnPCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from clustering.preprocess_users import users_data
def cluster_model(users_data, num_cluster=3):
array_users = users_data.values
X = array_users[:, 1:17]
X_std = StandardScaler().fit_transform(X)
sklearn_pca = sklearnPCA(n_components = 3)
Y_sklearn = sklearn_pca.fit_transform(X_std)
eigenValues = sklearn_pca.explained_variance_ratio_
loadings = sklearn_pca.components_
mu = np.mean(X, axis=0)
nComp = 2
Xhat = np.dot(sklearn_pca.transform(X)[:,:nComp], sklearn_pca.components_[:nComp,:])
Xhat = mu + Xhat
Xhat = pd.DataFrame(Xhat)
# X = PCA on previous data
X = Xhat.ix[:, '0':'1']
k = num_cluster # Define the number of clusters in which we want to partion the data
kmeans = KMeans(n_clusters = k) # Run the algorithm kmeans
kmeans.fit(X);
##sklearn.preprocessing.StandardScaler
centroids = kmeans.cluster_centers_ # Get centroid's coordinates for each cluster
labels = kmeans.labels_ # Get labels assigned to each data
final_labels = users_data[['user_id']]
final_labels['labels'] = pd.DataFrame(labels)
return final_labels
final_labels = cluster_model(users_data, num_cluster=3)
# labels1 = df_active['day_in_cycle']
# merged = df_active.join(info_df.set_index(['user_id']), on = ['user_id'], how = 'right', lsuffix='_x')
# labels2 = merged['labels']
'''
colors = ['r.', 'g.','b.'] # Define two colors for the plot below
plt.figure()
for i in range(len(X)):
plt.plot(X[i,0], X[i,1], colors[labels[i]], markersize = 30)
plt.scatter(centroids[:,0],centroids[:,1], marker = "x", s = 300, linewidths = 5) # Plot centroids
plt.show()
info_df['labels'] = labels
labels = df_a['day_in_cycle']
merged = df_a.join(info_df.set_index(['user_id']), on = ['user_id'], how = 'right', lsuffix='_x')
labels = merged['labels']
'''
| apache-2.0 |
gianlucacorrado/RNAcommender | rnacommender/rnafeatures.py | 1 | 6638 | #!/usr/bin/env python
"""Compute the RNA features."""
from __future__ import print_function
import argparse
import sys
from eden.converter.fasta import fasta_to_sequence
from eden.converter.rna.rnaplfold import rnaplfold_to_eden
from eden.graph import Vectorizer
from eden.util import vectorize as eden_vectorize
import pandas as pd
from rnacommender import fasta_utils
__author__ = "Gianluca Corrado"
__copyright__ = "Copyright 2016, Gianluca Corrado"
__license__ = "MIT"
__maintainer__ = "Gianluca Corrado"
__email__ = "[email protected]"
__status__ = "Production"
class RNAVectorizer():
"""Compute the RNA features."""
def __init__(self, fasta, output, window_size=150, max_bp_span=40,
avg_bp_prob_cutoff=0.4, complexity=2, nbits=10,
njobs=-1, verbose=True):
"""
Constructor.
Parameters
----------
fasta : str
Fasta file containing the RNA sequences.
output : str
Name of the output file. The output file is an HDF5 containing a
pandas DataFrame, in which the columns are the RNA names and the
rows are the EDeN features.
window_size : int (default : 150)
Window size of RNAplfold. Average the pair
probabilities over windows of given size.
max_bp_span : int (default : 40)
Maximum allowed separation of a base pair to span.
I.e. no pairs (i,j) with j-i > span will be allowed.
avg_bp_prob_cutoff : float (default : 0.4)
Report only base pairs with an average probability > cutoff.
complexity : int (default : 2)
Complexity of the features extracted. Equivalent to
define EDeN parameters d = r = complexity.
nbits : int (default : 10)
Number of bits that defines the feature space size:
|feature space|=2^nbits.
njobs : int (default : -1)
Number of parallel jobs (default: all CPUs).
verbose : bool (default : True)
Print information to STDOUT.
"""
self.fasta = fasta
self.output = output
self.window_size = window_size
self.max_bp_span = max_bp_span
self.avg_bp_prob_cutoff = avg_bp_prob_cutoff
self.complexity = complexity
self.nbits = nbits
self.njobs = njobs
self.verbose = verbose
def _fold_sequences(self):
"""Fold the RNA sequences using RNAplfold."""
if self.verbose:
print("Folding sequences using RNAplfold -W %i -L %i -c %f \
--noLP..." % (self.window_size, self.max_bp_span,
self.avg_bp_prob_cutoff), end=' ')
sys.stdout.flush()
seqs = fasta_to_sequence(self.fasta)
graphs = rnaplfold_to_eden(seqs,
window_size=self.window_size,
max_bp_span=self.max_bp_span,
avg_bp_prob_cutoff=self.avg_bp_prob_cutoff,
max_num_edges=1)
if self.verbose:
print("Done.\n")
sys.stdout.flush()
return graphs
def _vectorize_graphs(self, graphs):
"""Vectorize the RNAplfold graphs using EDeN."""
if self.verbose:
print("Vectorizing (complexity: %i, hashing: %i bits)..." %
(self.complexity, self.nbits), end=' ')
sys.stdout.flush()
vec = Vectorizer(complexity=self.complexity, nbits=self.nbits)
x_sparse = eden_vectorize(graphs, vectorizer=vec, n_jobs=self.njobs)
if self.verbose:
print("Done.\n")
sys.stdout.flush()
return x_sparse.todense()
def vectorize(self):
"""Produce the RNAfeatures."""
names = fasta_utils.seq_names(self.fasta)
graphs = self._fold_sequences()
x = self._vectorize_graphs(graphs)
df = pd.DataFrame(x.T[1:], columns=names)
store = pd.io.pytables.HDFStore(self.output)
store['features'] = df
store.close()
if self.verbose:
print("Done.\n")
print("RNA features saved to %s" % self.output)
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fasta', metavar='fasta', type=str,
help="""Fasta file containing the RNA sequences.""")
parser.add_argument('output', metavar='output', type=str,
help="""File name of the HDF Store to save the RNA \
features.""")
# RNAplfold parameters
parser.add_argument('--window-size', metavar='window_size', type=int,
default=150, help="""Window size of RNAplfold.""")
parser.add_argument('--max-bp-span', metavar='max_bp_span', type=int,
default=40, help="""Maximum allowed separation of a \
base pair to span.""")
parser.add_argument('--avg-bp-prob-cutoff', metavar='avg_bp_prob_cutoff',
type=float, default=0.4, help="""Report only base \
pairs with an average probability > cutoff.""")
# EDeN parameters
parser.add_argument('--complexity', metavar='complexity', type=int,
default=2, help="""Complexity of the features \
extracted.""")
parser.add_argument('--nbits', metavar='nbits', type=int, default=10,
help="""Number of bits that defines the feature space \
size: |feature space|=2^nbits.""")
# Other paramentes
parser.add_argument('--njobs', metavar='njobs', type=int, default=-1,
help="""Number of parallel jobs (-1 means all \
CPUs).""")
parser.add_argument('--quiet', dest='quiet', action='store_true',
default=False, help="""Do not print information at \
STDOUT.""")
args = parser.parse_args()
v = RNAVectorizer(fasta=args.fasta,
output=args.output,
window_size=args.window_size,
max_bp_span=args.max_bp_span,
avg_bp_prob_cutoff=args.avg_bp_prob_cutoff,
complexity=args.complexity,
nbits=args.nbits,
njobs=args.njobs,
verbose=(not args.quiet))
v.vectorize()
| mit |
OpenWeavers/openanalysis | doc/conf.py | 1 | 6129 | # -*- coding: utf-8 -*-
#
# OpenAnalysis documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 19 12:44:16 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import sys
import os
sys.path.insert(0, os.path.abspath('../Python/'))
extensions = ['nbsphinx',
'sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.githubpages'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openanalysis'
copyright = u'2017, OpenWeavers'
author = u'OpenWeavers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0-rc'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
autodoc_mock_imports = ['matplotlib', 'networkx', 'gi', 'numpy', 'mpl_toolkits','_tkinter']
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_logo = 'res/logo192.png'
html_favicon = 'res/icon.ico'
applehelp_icon = 'res/logo16.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenAnalysisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
'fontpkg': r'''\setmainfont{DejaVu Serif}
\setsansfont{DejaVu Sans}
\setmonofont{DejaVu Sans Mono}
''',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
'preamble': r"""\usepackage{atbegshi} % http://ctan.org/pkg/atbegshi
\AtBeginDocument{\AtBeginShipoutNext{\AtBeginShipoutDiscard}}
\documentclass{book}
\usepackage[titles]{tocloft}
\cftsetpnumwidth {1.25cm}\cftsetrmarg{1.5cm}
\setlength{\cftchapnumwidth}{0.75cm}
\setlength{\cftsecindent}{\cftchapnumwidth}
\setlength{\cftsecnumwidth}{1.25cm}
\usepackage[draft]{minted}\fvset{breaklines=true}
\addto\captionsenglish{\renewcommand{\contentsname}{Table of contents}}'
""",
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
# Remove blank pages in pdf
'classoptions': ',openany,oneside',
'fncychap': r'\usepackage[Bjornstrup]{fncychap}',
'printindex': r'\footnotesize\raggedright\printindex'
}
# nbsphinx_execute = 'always'
# nbsphinx_execute_arguments = ['--InlineBackend.figure_formats={"svg", "pdf"}']
# nbsphinx_allow_errors = True
latex_logo = 'res/logo192.png'
latex_show_urls = 'footnote'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'openanalysis.tex', u'openanalysis Documentation',
u'OpenWeavers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'openanalysis', u'openanalysis Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'openanalysis', u'openanalysis Documentation',
author, 'openanalysis', 'One line description of project.',
'Miscellaneous'),
]
| gpl-3.0 |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/scipy/stats/stats.py | 8 | 179053 | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
zmap
zscore
iqr
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
# Scipy imports.
from scipy._lib.six import callable, string_types, xrange
from scipy._lib._version import NumpyVersion
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import _find_repeats, linregress, theilslopes
from ._stats import _kendall_condis
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'iqr', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata',
'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
@np.deprecate(message=("scipy.stats.histogram is deprecated in scipy 0.17.0; "
"use np.histogram instead"))
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
# _histogram is used in relfreq/cumfreq, so need to keep it
res = _histogram(a, numbins=numbins, defaultlimits=defaultlimits,
weights=weights, printextras=printextras)
return res
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
"""
Compute the interquartile range of the data along the specified
axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the
calculations ignoring nan values. Default is 'propagate'.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`:
* 'linear' : `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower' : `i`.
* 'higher' : `j`.
* 'nearest' : `i` or `j` whichever is nearest.
* 'midpoint' : `(i + j) / 2`.
Default is 'linear'.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation, keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed, and not necessarily zero-mean.
Like other correlation coefficients, this one varies between -1 and +1
with 0 implying no correlation. Correlations of -1 or +1 imply an exact
linear relationship. Positive correlations imply that as x increases, so
does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
r : float
Pearson's correlation coefficient
p-value : float
2-tailed p-value
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt(((n-2)/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
return (cnt * (cnt - 1) // 2).sum()
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
con, dis = _kendall_condis(x, y) # concordant & discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.where(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie = count_rank_tie(x) - ntie # ties only in x
ytie = count_rank_tie(y) - ntie # ties only in y
if con + dis + xtie == 0 or con + dis + ytie == 0:
return KendalltauResult(np.nan, np.nan)
tau = (con - dis) / np.sqrt(con + dis + xtie) / np.sqrt(con + dis + ytie)
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * size + 10.0) / (9.0 * size * (size - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for ties in the Mann-Whitney U and
Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : None (deprecated), 'less', 'two-sided', or 'greater'
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to None, which results in a p-value half the size of
the 'two-sided' p-value and a different U statistic. The
default behavior is not the same as using 'less' or 'greater':
it only exists for backward compatibility and is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([ 2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([ 4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def rankdata(a, method='average'):
"""
rankdata(a, method='average')
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| gpl-3.0 |
fzalkow/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 7 | 7404 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import pylab as pl
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name,
train, test,
coverages, xgrid, ygrid):
"""
create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
points = dict(test=test, train=train)
for label, pts in points.iteritems():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=["bradypus_variegatus_0",
"microryzomys_minutus_0"]):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
pl.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
pl.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
pl.xticks([])
pl.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
pl.contourf(X, Y, Z, levels=levels, cmap=pl.cm.Reds)
pl.colorbar(format='%.2f')
# scatter training/testing points
pl.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
pl.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
pl.legend()
pl.title(species.name)
pl.axis('equal')
# Compute AUC w.r.t. background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
pl.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
pl.show()
| bsd-3-clause |
Winand/pandas | pandas/tests/frame/test_apply.py | 7 | 25966 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast(self):
broadcasted = self.frame.apply(np.mean, broadcast=True)
agged = self.frame.apply(np.mean)
for col, ts in compat.iteritems(broadcasted):
assert (ts == agged[col]).all()
broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)
agged = self.frame.apply(np.mean, axis=1)
for idx in broadcasted.index:
assert (broadcasted.xs(idx) == agged[idx]).all()
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = self.mixed_frame._apply_standard(np.mean, 0,
ignore_failures=True)
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), broadcast=True)
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = DataFrame(np.tile(self.frame.index,
(len(self.frame.columns), 1)).T,
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
def test_apply_multi_index(self):
s = DataFrame([[1, 2], [3, 4], [5, 6]])
s.index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s.columns = ['col1', 'col2']
res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
assert isinstance(res.index, MultiIndex)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = pd.Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = pd.Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, reduce=True)
reduce_false = df.apply(fn, reduce=False)
reduce_none = df.apply(fn, reduce=None)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
# See gh-12244
def test_apply_non_numpy_dtype(self):
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def zip_frames(*frames):
"""
take a list of frames, zip the columns together for each
assume that these all have the first frame columns
return a new frame
"""
columns = frames[0].columns
zipped = [f[c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
class TestDataFrameAggregate(TestData):
_multiprocess_can_split_ = True
def test_agg_transform(self):
with np.errstate(all='ignore'):
f_sqrt = np.sqrt(self.frame)
f_abs = np.abs(self.frame)
# ufunc
result = self.frame.transform(np.sqrt)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt])
expected = f_sqrt.copy()
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt])
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
expected = zip_frames(f_sqrt, f_abs)
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt', 'absolute']])
result = self.frame.apply([np.sqrt, np.abs])
assert_frame_equal(result, expected)
result = self.frame.transform(['sqrt', np.abs])
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'])
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']})
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
with tm.assert_produces_warning(FutureWarning):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
def test_agg_reduce(self):
# all reducers
expected = zip_frames(self.frame.mean().to_frame(),
self.frame.max().to_frame(),
self.frame.sum().to_frame()).T
expected.index = ['mean', 'max', 'sum']
result = self.frame.agg(['mean', 'max', 'sum'])
assert_frame_equal(result, expected)
# dict input with scalars
result = self.frame.agg({'A': 'mean', 'B': 'sum'})
expected = Series([self.frame.A.mean(), self.frame.B.sum()],
index=['A', 'B'])
assert_series_equal(result.reindex_like(expected), expected)
# dict input with lists
result = self.frame.agg({'A': ['mean'], 'B': ['sum']})
expected = DataFrame({'A': Series([self.frame.A.mean()],
index=['mean']),
'B': Series([self.frame.B.sum()],
index=['sum'])})
assert_frame_equal(result.reindex_like(expected), expected)
# dict input with lists with multiple
result = self.frame.agg({'A': ['mean', 'sum'],
'B': ['sum', 'max']})
expected = DataFrame({'A': Series([self.frame.A.mean(),
self.frame.A.sum()],
index=['mean', 'sum']),
'B': Series([self.frame.B.sum(),
self.frame.B.max()],
index=['sum', 'max'])})
assert_frame_equal(result.reindex_like(expected), expected)
def test_nuiscance_columns(self):
# GH 15015
df = DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
result = df.agg('min')
expected = Series([1, 1., 'bar', pd.Timestamp('20130101')],
index=df.columns)
assert_series_equal(result, expected)
result = df.agg(['min'])
expected = DataFrame([[1, 1., 'bar', pd.Timestamp('20130101')]],
index=['min'], columns=df.columns)
assert_frame_equal(result, expected)
result = df.agg('sum')
expected = Series([6, 6., 'foobarbaz'],
index=['A', 'B', 'C'])
assert_series_equal(result, expected)
result = df.agg(['sum'])
expected = DataFrame([[6, 6., 'foobarbaz']],
index=['sum'], columns=['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_non_callable_aggregates(self):
# GH 16405
# 'size' is a property of frame/series
# validate that this is working
df = DataFrame({'A': [None, 2, 3],
'B': [1.0, np.nan, 3.0],
'C': ['foo', None, 'bar']})
# Function aggregate
result = df.agg({'A': 'count'})
expected = pd.Series({'A': 2})
assert_series_equal(result, expected)
# Non-function aggregate
result = df.agg({'A': 'size'})
expected = pd.Series({'A': 3})
assert_series_equal(result, expected)
# Mix function and non-function aggs
result1 = df.agg(['count', 'size'])
result2 = df.agg({'A': ['count', 'size'],
'B': ['count', 'size'],
'C': ['count', 'size']})
expected = pd.DataFrame({'A': {'count': 2, 'size': 3},
'B': {'count': 2, 'size': 3},
'C': {'count': 2, 'size': 3}})
assert_frame_equal(result1, result2, check_like=True)
assert_frame_equal(result2, expected, check_like=True)
# Just functional string arg is same as calling df.arg()
result = df.agg('count')
expected = df.count()
assert_series_equal(result, expected)
# Just a string attribute arg same as calling df.arg
result = df.agg('size')
expected = df.size
assert result == expected
| bsd-3-clause |
tjlane/pypad | pypad/mask.py | 1 | 29342 |
# THIS FILE IS PART OF PyPad, AND IS GOVERENED BY A PERMISSIBILITY LICENSE
# GOVERNING ITS USE AND DISTRIBUTION. YOU SHOULD HAVE RECIEVED A COPY OF THIS
# LICENSE WITH THE SOFTWARE; IF NOT PROVIDED, WRITE TO <[email protected]>.
#
# AUTHORS:
# TJ Lane <[email protected]>
# Jonas Sellberg <[email protected]>
#
# Apr 30, 2013
"""
mask.py
Provides a "mask" object for CSPads.
"""
import numpy as np
import h5py
import matplotlib.pyplot as plt
import matplotlib.colors as col
from matplotlib.widgets import Button
from matplotlib.path import Path
from pypad import utils
from pypad import read
from pypad.plot import ToggleButton
class PadMask(object):
"""
An mask for a CSPad object.
"""
def __init__(self):
"""
Initialize a CSPad mask object.
"""
self._masks = {}
self._masks['base'] = self._blank_mask()
return
@property
def mask(self):
m = np.product( np.array(list(self._masks.values())), axis=0 )
assert m.shape == (4,16,185,194)
return m
@property
def mask2d(self):
return utils.flatten_2x1s( self.mask )
@property
def inverted(self):
"""
Invert the mask. Usually "True" is a good pixel, and "False" is a bad
one, but this flips them.
Returns
-------
"""
return np.logical_not(self.mask)
@property
def num_masked(self):
"""
Returns the number of masked pixels
"""
inv_mask = 1.0 - self.mask
return np.sum(inv_mask)
@property
def types_applied(self):
return list(self._masks.keys())
def remove_mask(self, mask_name):
"""
Remove a mask that has been applied.
"""
if mask_name == 'base': # this one is special
self._masks['base'] = self._blank_mask()
elif not mask_name in list(self._masks.keys()):
raise KeyError('Mask: %s not applied' % mask_name)
else:
x = self._masks.pop(mask_name)
print("Removed mask: %s" % mask_name)
return
def _inject_mask(self, mask_name, mask, override_previous=False):
"""
Add a new kind of mask to this mask object. Provides some typechecking.
All this really does is deposit `mask` into dict self._masks with key
`mask_name`.
"""
assert type(mask_name) == str
assert mask.shape == (4, 16, 185, 194)
if not mask.dtype == np.bool:
mask = mask.astype(np.bool)
if (not mask_name in list(self._masks.keys())) or override_previous:
self._masks[mask_name] = mask
else:
raise KeyError('Mask object already has `%s` mask.' % mask_name)
return
def _check_image(self, image):
"""
Sanity check on `image`.
"""
if not image.shape == (4, 16, 185, 194):
raise ValueError('`image` must be shape (4, 16, 185, 194), got '
'%s' % str(image.shape))
return
def _blank_mask(self):
"""
Utility function that just returns a blank mask.
"""
return np.ones((4, 16, 185, 194), dtype=np.int32)
# ----------
# below we provide many methods of the form PadMask.mask_*(), which allow
# one to mask pixels via various criteria (indicated by the *)
# to add a new kind of mask, make a new method here. Follow mask_threshold
# as a template
def mask_pixel(self, quad, asic, x, y):
"""
Mask a single pixel, or series of pixels. To do the latter, pass arrays
as the arguments (even though the below says int).
Parameters
----------
quad : int
[0,1,2,3], denoting the quad.
asic : int
Int in [0,7], denoting asic.
x : int
Int in [0,184], denoting x position.
y : int
Int in [0,194], denoting x position.
"""
self._masks['base'][quad, asic, x, y] = 0
return
def unmask_pixel(self, quad, asic, x, y):
"""
Mask a single pixel, or series of pixels. To do the latter, pass arrays
as the arguments (even though the below says int).
Parameters
----------
quad : int
[0,1,2,3], denoting the quad.
asic : int
Int in [0,7], denoting asic.
x : int
Int in [0,184], denoting x position.
y : int
Int in [0,194], denoting x position.
"""
self._masks['base'][quad, asic, x, y] = 1
return
def mask_threshold(self, image, upper=None, lower=None):
"""
Mask pixels by threshold values, in ADU. You must supply either `upper`
or `lower`, but one one is required.
Parameters
----------
image : np.ndarray
A shape (4, 8, 185, 388) array describing a CSPad image.
upper : int
Values greater than this are masked.
lower : int
Values lower than this are masked.
"""
print("Masking pixels outside of [%s,%s]" % (str(lower), str(upper)))
if (upper == None) and (lower == None):
raise ValueError('Either `upper` or `lower` (or both) must be specified')
if upper and lower:
if upper <= lower:
raise ValueError('Must have: `upper` > `lower` to threshold')
self._check_image(image)
m = self._blank_mask()
ind = (image > upper) + (image < lower)
m[ind] = 0
self._inject_mask('threshold', m, override_previous=True)
return
def mask_nonbonded(self, nearest_neighbours=True):
"""
Mask pixels on the CSPad that were never bonded.
Optional Parameters
-------------------
nearest_neighbours : bool
Also mask four of their nearest neighbours, which give anomoulous
responses.
"""
print("Masking nonbonded pixels")
m = self._blank_mask()
for i in range(4):
for j in range(16):
for p in range(0, 185, 10):
m[i,j,p,p] = 0
if nearest_neighbours:
if p == 0:
m[i,j,p+1,p] = 0
m[i,j,p,p+1] = 0
else:
m[i,j,p-1:p+2,p] = 0
m[i,j,p,p-1:p+2] = 0
self._inject_mask('nonbonded', m, override_previous=True)
return
def mask_borders(self, num_pixels=1):
"""
Mask the border of each ASIC, to a width of `num_pixels`.
Parameters
----------
num_pixels : int
The size of the border region to mask.
"""
print("Masking %d pixels around the border of each 2x1" % num_pixels)
n = int(num_pixels)
m = self._blank_mask()
if (num_pixels < 0) or (num_pixels > 194):
raise ValueError('`num_pixels` must be >0, <194')
for i in range(4):
for j in range(16):
# mask along the y-dim
m[i,j,:,0:n] = np.bool(False)
m[i,j,:,194-n:194] = np.bool(False)
# mask along the x-dim
m[i,j,0:n,:] = np.bool(False)
m[i,j,185-n:185,:] = np.bool(False)
# # mask a bar along y in the middle of the 2x1
# m[i,j,:,194-n:194+n] = np.bool(False)
self._inject_mask('border', m, override_previous=True)
return
def mask_row13(self):
print("Masking row 13")
#raise NotImplementedError()
print("Warning: row 13 masking is untested, tell the dev team if you need it.")
#this is for masking out row13 of the CSPAD
col=181
for i in range(8):
self.automask[:,col]=1
col+= 194
self._inject_mask('row13', m, override_previous=True)
# ----------
def merge(self, *args):
"""
Merge two or more masks, masking with an OR operator for masked pixels.
"""
for mask in args:
for mtype in list(mask._masks.keys()):
if mtype in list(self._masks.keys()):
self._masks[mtype] = np.logical_not( np.logical_or(self._masks[mtype],
mask._masks[mtype]) )
else:
self._masks[mtype] = mask._masks[mtype]
return
def save(self, filename, fmt='pypad'):
"""
Save the PadMask object to one of many possible formats:
-- pypad : An hdf5 format that includes all metadata associated with the
mask. Not read by other software (suffix: .mask).
-- cheetah : Stores the mask as a two-dimensional array in an HDF5 format.
Easily read into Cheetah and Thor (suffix: .h5).
Parameters
----------
filename : str
The name of the file to write. This function will append an
appropriate suffix if none is provided.
fmt : str, {'pypad', 'cheetah', 'thor'}
The format to save in. See above for documentation.
"""
if fmt == 'pypad':
if not filename.endswith('.mask'):
filename += '.mask'
f = h5py.File(filename, 'w')
for k in list(self._masks.keys()):
f['/' + k] = self._masks[k]
f.close()
elif fmt == 'thor':
if not filename.endswith('.h5'):
filename += '.h5'
# super ghetto, but it was the easy way out. sorry.
# this converts first to cheetah 2d format, then
# to Thor 1d format. I used it b/c I could be sure it
# worked just by c/p code...
itx = self.mask2d
if not itx.shape == (1480, 1552):
raise ValueError('`itx` argument array incorrect shape! Must be:'
' (1480, 1552), got %s.' % str(itx.shape))
flat_itx = np.zeros(1480 * 1552, dtype=itx.dtype)
for q in range(4):
for twoXone in range(8):
# extract the cheetah itx
x_start = 388 * q
x_stop = 388 * (q+1)
y_start = 185 * twoXone
y_stop = 185 * (twoXone + 1)
# each sec is a ASIC, both belong to the same 2x1
sec1, sec2 = np.hsplit(itx[y_start:y_stop,x_start:x_stop], 2)
# determine the positions of the flat array to put intens data in
n_ASIC_pixels = 185 * 194
flat_start = (q * 8 + twoXone) * (n_ASIC_pixels * 2) # 2x1 index X px in 2x1
# inject them into the thor array
flat_itx[flat_start:flat_start+n_ASIC_pixels] = sec1.flatten()
flat_itx[flat_start+n_ASIC_pixels:flat_start+n_ASIC_pixels*2] = sec2.flatten()
f = h5py.File(filename, 'w')
f['/mask'] = flat_itx
f.close()
elif fmt in ['cheetah', 'twod']:
if not filename.endswith('.h5'):
filename += '.h5'
f = h5py.File(filename, 'w')
f['/data/data'] = self.mask2d
f.close()
else:
raise IOError('Unrecognized format for PadMask: %s. Should be one of'
' {"pypad", "thor", "cheetah", "twod"}' % fmt)
print("Wrote: %s" % filename)
return
@classmethod
def load(cls, filename):
"""
Load a saved mask. Can be one of many formats:
-- pypad .mask
-- cheetah .h5
Parameters
----------
filename : str
The name of the file to read.
"""
m = cls()
if filename.endswith('.mask'):
f = h5py.File(filename, 'r')
for k in f:
m._masks[k] = np.array(f[k])
f.close()
elif filename.endswith('.h5'):
try:
f = h5py.File(filename, 'r')
d = np.array( f['/data/data'] )
assert d.shape == (1480, 1552)
f.close()
except:
raise IOError('Cannot read data inside: %s. Either data is '
'corrupt or not in cheetah format [in /data/data'
' and shape (1480, 1552)]' % filename)
m._masks['cheetah'] = np.array( read.enforce_raw_img_shape(d) )
else:
raise IOError('Can only read files with {.mask, .h5} format -- got: %s' % filename)
return m
class MaskGUI(object):
def __init__(self, raw_image, mask=None, filename='my_mask', fmt='pypad'):
"""
Instantiate an interactive masking session.
Parameters
----------
raw_image : np.ndarray
A shape (4, 8, 185, 388) array containing a reference image that
the user will use to guide their masking.
mask : padmask.PadMask
A PadMask object to modify. If `None` (default), generate a new
mask.
filename : str
The name of the file to generate at the end of the session.
fmt : str
The file format of `filename` to write.
"""
self.print_gui_help()
self.filename = filename
self.file_fmt = fmt
if not raw_image.shape == (4, 16, 185, 194):
raise ValueError("`raw_image` must have shape: (4, 16, 185, 194)")
if mask == None:
self.mask = PadMask()
elif isinstance(mask, PadMask):
self.mask = mask
else:
raise TypeError('`mask` argument must be a pypad.mask.PadMask object')
# inject a new mask type into our PadMask obj
m = self.mask._blank_mask()
if not 'manual' in list(self.mask._masks.keys()):
self.mask._inject_mask('manual', m)
# deal with negative values
if not 'negatives' in list(self.mask._masks.keys()):
self.mask._inject_mask('negatives', m.copy())
self.mask._masks['negatives'][raw_image <= 0.0] = 0
print("Masked: %d negative pixels" % np.sum(np.logical_not(self.mask._masks['negatives'])))
# we're going to plot the log of the image, so do that up front
self.raw_image_4d = raw_image
self.raw_image = utils.flatten_2x1s(raw_image)
self.log_image = self.raw_image.copy()
self.log_image[self.log_image < 0.0] = 0.0
self.log_image = np.log10(self.log_image + 1.0)
# populate an array containing the indices of all pixels in the image
mg = np.meshgrid( np.arange(self.raw_image.shape[0]),
np.arange(self.raw_image.shape[1]) )
self.points = np.vstack((mg[0].flatten(), mg[1].flatten())).T
# create a colormap with masked pixels clearly highlighted
self.palette = plt.cm.PuOr_r # reversed purple-orange -- base cm
self.palette.set_under(color='green')
# draw the main GUI, which is an image that can be interactively masked
plt.figure(figsize=(9,6))
self.ax = plt.subplot(111)
self.im = self.ax.imshow( (self.log_image * self.mask.mask2d) - 1e-10, cmap=self.palette,
origin='lower', interpolation='nearest', vmin=1e-10, aspect=1,
extent=[0, self.log_image.shape[0], 0, self.log_image.shape[1]] )
self.lc, = self.ax.plot((0,0),(0,0),'-+m', linewidth=1, markersize=8, markeredgewidth=1)
self.lm, = self.ax.plot((0,0),(0,0),'-+m', linewidth=1, markersize=8, markeredgewidth=1)
self.line_corner = (0,0)
self.xy = None
self.lines_xy = None
self.single_px = None # for masking single pixels
self.colorbar = plt.colorbar(self.im, pad=0.01)
self.colorbar.set_label(r'$\log_{10}$ Intensity')
cidb = plt.connect('button_press_event', self.on_click)
cidk = plt.connect('key_press_event', self.on_keypress)
cidm = plt.connect('motion_notify_event', self.on_move)
plt.xlim([0, self.log_image.shape[0]])
plt.ylim([0, self.log_image.shape[1]])
self.ax.get_xaxis().set_ticks([])
self.ax.get_yaxis().set_ticks([])
# add toggle buttons that allow the user to turn on and off std masks
# I used to have this in its own nice function, but MPL didn't like
# that for some reason... there is probably a better way, I just dont
# know the innerds of MPL enough --TJL
axcolor = 'lightgoldenrodyellow'
ax1 = plt.axes([0.04, 0.7, 0.12, 0.08])
self.b1 = ToggleButton(ax1, 'nonbonded', color=axcolor, hovercolor='0.975')
self.b1.on_turned_on(self.mask.mask_nonbonded)
self.b1.on_turned_off(self.mask.remove_mask, 'nonbonded')
self.b1.on_turned_on(self.update_image)
self.b1.on_turned_off(self.update_image)
ax2 = plt.axes([0.04, 0.6, 0.12, 0.08])
self.b2 = ToggleButton(ax2, 'row 13', color=axcolor, hovercolor='0.975')
self.b2.on_turned_on(self.mask.mask_row13)
self.b2.on_turned_off(self.mask.remove_mask, 'row13')
self.b2.on_turned_on(self.update_image)
self.b2.on_turned_off(self.update_image)
ax3 = plt.axes([0.04, 0.5, 0.12, 0.08])
self.b3 = ToggleButton(ax3, 'borders', color=axcolor, hovercolor='0.975')
self.b3.on_turned_on(self._set_borderwidth)
self.mask_border_cid = self.b3.on_turned_on(self.mask.mask_borders)
self.b3.on_turned_off(self.mask.remove_mask, 'border')
self.b3.on_turned_on(self.update_image)
self.b3.on_turned_off(self.update_image)
ax4 = plt.axes([0.04, 0.4, 0.12, 0.08])
self.b4 = ToggleButton(ax4, 'threshold', color=axcolor, hovercolor='0.975')
self.b4.on_turned_on(self._set_threshold)
self.mask_threshold_cid = self.b4.on_turned_on(self.mask.mask_threshold, self.raw_image_4d, None, None)
self.b4.on_turned_off(self.mask.remove_mask, 'threshold')
self.b4.on_turned_on(self.update_image)
self.b4.on_turned_off(self.update_image)
plt.show()
return
def _set_threshold(self):
print("\n --- Enter threshold values --- ")
self.lower_thld = float( input('Enter lower threshold: ') )
self.upper_thld = float( input('Enter upper threshold: ') )
self.b4.onstate_exargs[ self.mask_threshold_cid ] = (self.raw_image_4d, self.upper_thld, self.lower_thld)
return
def _set_borderwidth(self):
print("\n --- Enter the desired border width --- ")
raw_in = input('Size of border (in pixels) [1]: ')
if raw_in == '':
self.borderwidth = 1
else:
self.borderwidth = int( raw_in )
self.b3.onstate_exargs[ self.mask_border_cid ] = (self.borderwidth,)
return
def update_image(self):
self.im.set_data( (self.log_image * self.mask.mask2d) - 1e-10 )
return
def on_click(self, event):
# for WHATEVER reason, the imshow drawing is stretched incorrectly
# such that pixel positions for x/y are off by the ratio used below
# ... this is likely due to me not understanding MPL, hence this hack
# -- TJL
ratio = float(self.log_image.shape[0]) / float(self.log_image.shape[1])
x_coord = event.xdata / ratio
y_coord = event.ydata * ratio
# if a button that is *not* the left click is pressed
if event.inaxes and (event.button is not 1):
# save the points for masking in pixel coordinates
if self.xy != None:
self.xy = np.vstack(( self.xy, np.array([int(x_coord),
int(y_coord)]) ))
else:
self.xy = np.array([int(x_coord), int(y_coord)])
# save the points for drawing the lines in MPL coordinates
if self.lines_xy != None:
self.lines_xy = np.vstack(( self.lines_xy, np.array([int(event.xdata),
int(event.ydata)]) ))
else:
self.lines_xy = np.array([int(event.xdata), int(event.ydata)])
self.lc.set_data(self.lines_xy.T) # draws lines
self.line_corner = (int(event.xdata), int(event.ydata))
# if the left button is pressed
elif event.inaxes and (event.button is 1):
self.single_px = (int(x_coord), int(y_coord))
print("Selected: (%s, %s)" % self.single_px)
return
def on_keypress(self, event):
# mask or unmask
if event.key in ['m', 'u']:
if self.xy == None:
print("No area selected, mask not changed.")
else:
# print "Masking region inside:"
# print self.xy
# wrap around to close polygon
self.xy = np.vstack(( self.xy, self.xy[0,:] ))
path = Path(self.xy)
in_area = path.contains_points(self.points+0.5)
inds = self.points[in_area]
#print self.xy
#print inds
# if we're going to mask, mask
if event.key == 'm':
print('Masking convex area...')
x = self._conv_2dinds_to_4d(inds)
self.mask._masks['manual'][x[:,0],x[:,1],x[:,2],x[:,3]] = 0
# if we're unmasking, unmask
elif event.key == 'u':
print('Unmasking convex area...')
x = self._conv_2dinds_to_4d(inds)
self.mask._masks['manual'][x[:,0],x[:,1],x[:,2],x[:,3]] = 1
# draw and reset
self.update_image()
self._reset()
plt.draw()
# reset all masks
elif event.key == 'r':
print('Unmasking all')
self.mask._masks['manual'] = self.mask._blank_mask()
self.update_image()
self._reset()
#self.im.autoscale()
plt.draw()
# toggle selection
elif event.key == 't':
if self.single_px != None:
x = self._conv_2dinds_to_4d( np.array(self.single_px)[None,:] )
x = x.flatten()
if self.mask.mask[x[0],x[1],x[2],x[3]] == 0:
print("Unmasking single pixel:", self.single_px)
self.mask._masks['manual'][x[0],x[1],x[2],x[3]] = 1
else:
print("Masking single pixel:", self.single_px)
self.mask._masks['manual'][x[0],x[1],x[2],x[3]] = 0
else:
print("No single pixel selected to toggle. Click a pixel and")
print(" press `t` to toggle the mask on that pixel.")
self.update_image()
self._reset()
#self.im.autoscale()
plt.draw()
# clear mouse selection
elif event.key == 'x':
print("Reset selections")
self._reset()
# save and exit
elif event.key == 'w':
self.mask.save(self.filename, fmt=self.file_fmt)
plt.close()
return
# exit w/o saving
elif event.key == 'q':
print('Exiting without saving...')
plt.close()
return
# else:
# print "Could not understand key: %s" % event.key
# print "Valid options: {m, u, r, k, q}"
return
def on_move(self, event):
if not event.inaxes: return
xm, ym = int(event.xdata), int(event.ydata)
# update the line positions
if self.line_corner != (0,0):
self.lm.set_data((self.line_corner[0],xm), (self.line_corner[1],ym))
plt.draw()
return
def _reset(self):
self.single_pixel = None
self.xy = None
self.lines_xy = None
self.lc.set_data([], [])
self.lm.set_data([], [])
self.line_corner = (0, 0)
return
def _conv_2dinds_to_4d(self, inds):
"""
Convert indices in a 2d Cheetah array to (4,16,185,194).
Parameters
----------
inds : np.ndarray, int
An N x 2 array, where the first column indexes x on the 2d image,
and the second column indexes y.
Returns
-------
inds_4d : np.ndarray, int
An N x 4 array, with each column indexing quads/asics/y/x,
"""
inds_4d = np.zeros((inds.shape[0], 4), dtype=np.int32)
# index each asic, in the correct order
of64 = (inds[:,1] / 185) * 2 + (inds[:,0] / 388) * 16 + (inds[:,0] / 194) % 2
assert np.all(of64 < 64)
# quads / asics
# print 'masking in ASICs:', inds, of64
inds_4d[:,0] = of64 / 16
inds_4d[:,1] = of64 % 16
# x / y : note the image is displayed transposed
inds_4d[:,2] = (inds[:,1] % 185)
inds_4d[:,3] = (inds[:,0] % 194)
return inds_4d
def print_gui_help(self):
print()
print()
print(" --- WELCOME TO PYPAD's INTERACTIVE MASKING ENVIRONMENT --- ")
print()
print(" Green pixels are masked.")
print()
print(" Keystrokes")
print(" ----------")
print(" m : mask u : unmask r : reset ")
print(" x : clear selection w : save & exit t : toggle pixel")
print(" q : exit w/o saving")
print()
print(" Mouse")
print(" -----")
print(" Right click on three or more points to draw a polygon around a")
print(" set of pixels. Then press `m` or `u` to mask or unmask that area.")
print()
print(" You can also mask/unmask single pixels by clicking on them with")
print(" the mouse and pressing `t` to toggle the mask state.")
print()
print(" Toggle Buttons (left)")
print(" ---------------------")
print(" nonbonded : Mask nonbonded pixels, and their nearest neighbours.")
print(" These pixels aren't even connected to the detector.")
print()
print(" row 13 : In some old experiments, row 13 on one ASIC was ")
print(" busted -- mask that (will be clear if this is needed)")
print()
print(" threshold : You will be prompted for an upper and lower limit --")
print(" pixels outside that range are masked. Units are ADUs")
print(" and you can set only a lower/upper limit by passing")
print(" 'None' for one option.")
print()
print(" borders : Mask the borders of each ASIC. These often give")
print(" anomoulous responses. Recommended to mask one pixel")
print(" borders at least.")
print("")
print(" ----- // -----")
print()
| gpl-2.0 |
laserson/phip-stat | phip/cli.py | 1 | 31221 | # Copyright 2016 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import gzip
import json
import os
import re
import sys
from collections import Counter, OrderedDict
from functools import reduce
from glob import glob
from os import path as osp
from os.path import join as pjoin
from subprocess import PIPE, Popen
import numpy as np
import pandas as pd
from click import Choice, Path, command, group, option
from tqdm import tqdm
from phip.utils import DEFAULT_FDR, compute_size_factors, readfq
# handle gzipped or uncompressed files
def open_maybe_compressed(*args, **kwargs):
if args[0].endswith(".gz"):
# gzip modes are different from default open modes
if len(args[1]) == 1:
args = (args[0], args[1] + "t") + args[2:]
compresslevel = kwargs.pop("compresslevel", 6)
return gzip.open(*args, **kwargs, compresslevel=compresslevel)
else:
return open(*args, **kwargs)
@group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
"""phip -- PhIP-seq analysis tools"""
pass
@cli.command(name="truncate-fasta")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input fasta",
)
@option("-o", "--output", required=True, type=Path(exists=False), help="output fasta")
@option(
"-k",
"--length",
required=True,
type=int,
help="length of starting subsequence to extract",
)
def truncate_fasta(input, output, length):
"""Truncate each sequence of a fasta file."""
with open(input, "r") as ip, open(output, "w") as op:
for (n, s, q) in readfq(ip):
print(f">{n}\n{s[:length]}", file=op)
@cli.command(name="merge-kallisto-tpm")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, file_okay=False),
help="input dir containing kallisto results",
)
@option("-o", "--output", required=True, type=Path(exists=False), help="output path")
def merge_kallisto_tpm(input, output):
"""Merge kallisto abundance results.
Input directory should contain sample-named subdirectories, each containing
an abundance.tsv file. This command will generate a single tab-delim
output file with each column containing the tpm values for that sample.
"""
samples = os.listdir(input)
iterators = [open(pjoin(input, s, "abundance.tsv"), "r") for s in samples]
with open(output, "w") as op:
it = zip(*iterators)
# burn headers of input files and write header of output file
_ = next(it)
print("id\t{}".format("\t".join(samples)), file=op)
for lines in it:
fields_array = [line.split("\t") for line in lines]
# check that join column is the same
assert all([fields[0] == fields_array[0][0] for fields in fields_array])
merged_fields = [fields_array[0][0]] + [f[4].strip() for f in fields_array]
print("\t".join(merged_fields), file=op)
@cli.command(name="gamma-poisson-model")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input counts file (tab-delim)",
)
@option(
"-o", "--output", required=True, type=Path(exists=False), help="output directory"
)
@option(
"-t",
"--trim-percentile",
default=99.9,
help="lower percent of data to keep for model fitting",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
def gamma_poisson_model(input, output, trim_percentile, index_cols):
"""Fit a gamma-poisson model.
Compute -log10(pval) for each (possibly-normalized) count.
"""
from phip.gampois import gamma_poisson_model as model
counts = pd.read_csv(input, sep="\t", header=0, index_col=list(range(index_cols)))
os.makedirs(output, exist_ok=True)
alpha, beta, rates, mlxp = model(counts, trim_percentile)
with open(pjoin(output, "parameters.json"), "w") as op:
json.dump(
{
"alpha": alpha,
"beta": beta,
"trim_percentile": trim_percentile,
"background_rates": list(rates),
},
op,
)
mlxp.to_csv(pjoin(output, "mlxp.tsv"), sep="\t", float_format="%.2f")
@cli.command(name="clipped-factorization-model")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input counts file (tab-delim)",
)
@option(
"-o",
"--output",
required=False,
type=Path(exists=False),
help="output file or directory. If ends in .tsv, will be treated as file",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
@option("--rank", default=3, show_default=True, help="matrix rank")
@option(
"--clip-percentile",
default=99.9,
show_default=True,
help="percentile thershold to clip at",
)
@option(
"--learning-rate",
default=1.0,
show_default=True,
help="learning rate for Adam optimizer",
)
@option(
"--minibatch-size", default=1024 * 32, show_default=True, help="rows per minibatch"
)
@option(
"--patience",
default=5,
show_default=True,
help="number of epochs of no improvement to wait before early stopping",
)
@option("--max-epochs", default=1000, show_default=True, help="maximum epochs")
@option(
"--discard-sample-reads-fraction",
default=0.01,
show_default=True,
help="Discard samples with fewer than X * m reads, where m is the median "
"number of reads across samples",
)
@option(
"--no-normalize-to-reads-per-million",
is_flag=True,
help="Work directly on read counts, not counts divided by sample totals",
)
@option(
"--log-every-seconds",
default=1,
show_default=True,
help="write progress no more often than every N seconds",
)
def clipped_factorization_model(
input,
output,
index_cols,
rank,
clip_percentile,
learning_rate,
minibatch_size,
patience,
max_epochs,
discard_sample_reads_fraction,
no_normalize_to_reads_per_million,
log_every_seconds,
):
"""Fit matrix factorization model.
Computes residuals from a matrix factorization model. Specifically, attempt
to detect and correct for clone and sample batch effects by subtracting off
a learned low-rank reconstruction of the given counts matrix.
The result is the (clones x samples) matrix of residuals after correcting for
batch effects. A few additional rows and columns (named _background_0,
_background_1, ...) giving the learned effects are also included.
"""
from phip.clipped_factorization import do_clipped_factorization
counts = pd.read_csv(input, sep="\t", header=0, index_col=list(range(index_cols)))
total_reads = counts.sum()
expected_reads = total_reads.median()
for sample in counts.columns:
if total_reads[sample] / expected_reads < discard_sample_reads_fraction:
print(
"[!!] EXCLUDING SAMPLE %s DUE TO INSUFFICIENT READS "
"(%d vs. sample median %d)"
% (sample, total_reads[sample], expected_reads)
)
del counts[sample]
result_df = do_clipped_factorization(
counts,
rank=rank,
clip_percentile=clip_percentile,
learning_rate=learning_rate,
minibatch_size=minibatch_size,
patience=patience,
max_epochs=max_epochs,
normalize_to_reads_per_million=not no_normalize_to_reads_per_million,
log_every_seconds=log_every_seconds,
)
if output.endswith(".tsv"):
output_path = output
else:
os.makedirs(output)
output_path = pjoin(output, "mixture.tsv")
result_df.to_csv(output_path, sep="\t", float_format="%.2f")
print("Wrote: %s" % output_path)
@cli.command(name="call-hits")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input counts file (tab-delim)",
)
@option(
"-o",
"--output",
required=False,
type=Path(exists=False),
help="output file or directory. If ends in .tsv, will be treated as file",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
@option(
"--beads-regex",
default=".*beads.*",
show_default=True,
help="samples with names matching this regex are considered beads-only",
)
@option(
"--ignore-columns-regex",
default="^_background.*",
show_default=True,
help="ignore columns matching the given regex (evaluated in case-insensitive"
" mode.) Ignored columns are passed through to output without processing.",
)
@option(
"--ignore-rows-regex",
default="^_background.*",
show_default=True,
help="ignore rows matching the given regex (evaluated in case-insensitive "
"mode). Ignored rows are passed through to output without processing.",
)
@option(
"--fdr", default=DEFAULT_FDR, show_default=True, help="target false discovery rate"
)
@option(
"--normalize-to-reads-per-million",
type=Choice(["always", "never", "guess"]),
default="guess",
show_default=True,
help="Divide counts by totals per sample. Recommended "
"when running directly on raw read counts (as opposed to matrix "
'factorization residuals). If set to "guess" then the counts matrix '
"will be left as-is if it contains negative entries, and otherwise "
"will be normalized.",
)
@option(
"--verbosity",
default=2,
show_default=True,
help="verbosity: no output (0), result summary only (1), or progress (2)",
)
def call_hits(
input,
output,
index_cols,
beads_regex,
ignore_columns_regex,
ignore_rows_regex,
fdr,
normalize_to_reads_per_million,
verbosity,
):
"""Call hits at specified FDR using a heuristic.
Either raw read counts or the result of the clipped-factorization-model
sub-command can be provided.
The result is a matrix of shape (clones x samples). Entries above 1.0 in
this matrix indicate hits. Higher values indicate more evidence for a
hit, but there is no simple interpretation of these values beyond whether
they are below/above 1.0.
See the documentation for `hit_calling.do_hit_calling()` for details on
the implementation.
"""
from phip.hit_calling import do_hit_calling
original_counts = pd.read_csv(
input, sep="\t", header=0, index_col=list(range(index_cols))
)
counts = original_counts
print("Read input matrix: %d clones x %d samples." % counts.shape)
print("Columns: %s" % " ".join(counts.columns))
columns_to_ignore = [
s
for s in counts.columns
if ignore_columns_regex
and re.match(ignore_columns_regex, s, flags=re.IGNORECASE)
]
if columns_to_ignore:
print(
"Ignoring %d columns matching regex '%s': %s"
% (
len(columns_to_ignore),
ignore_columns_regex,
" ".join(columns_to_ignore),
)
)
counts = counts[[c for c in counts.columns if c not in columns_to_ignore]]
rows_to_ignore = [
s
for s in counts.index
if ignore_rows_regex
and index_cols == 1
and re.match(ignore_rows_regex, s, flags=re.IGNORECASE)
]
if rows_to_ignore:
print(
"Ignoring %d rows matching regex '%s': %s"
% (len(rows_to_ignore), ignore_rows_regex, " ".join(rows_to_ignore))
)
counts = counts.loc[~counts.index.isin(rows_to_ignore)]
beads_only_samples = [
s for s in counts.columns if re.match(beads_regex, s, flags=re.IGNORECASE)
]
print(
"Beads-only regex '%s' matched %d samples: %s"
% (beads_regex, len(beads_only_samples), " ".join(beads_only_samples))
)
result_df = do_hit_calling(
counts,
beads_only_samples=beads_only_samples,
fdr=fdr,
normalize_to_reads_per_million={"always": True, "never": False, "guess": None}[
normalize_to_reads_per_million
],
verbosity=verbosity,
)
full_result_df = original_counts.copy()
for column in result_df.columns:
full_result_df.loc[result_df.index, column] = result_df[column]
if output.endswith(".tsv"):
output_path = output
else:
os.makedirs(output)
output_path = pjoin(output, "hits.tsv")
full_result_df.to_csv(output_path, sep="\t", float_format="%.4f")
print("Wrote: %s" % output_path)
# TOOLS THAT SHOULD BE USED RARELY
@cli.command(name="zip-reads-and-barcodes")
@option(
"-i",
"--input",
type=Path(exists=True, dir_okay=False),
required=True,
help="reads fastq file",
)
@option(
"-b",
"--barcodes",
type=Path(exists=True, dir_okay=False),
required=True,
help="indexes/barcodes fastq file",
)
@option(
"-m",
"--mapping",
type=Path(exists=True, dir_okay=False),
required=True,
help="barcode to sample mapping (tab-delim, no header line)",
)
@option(
"-o", "--output", type=Path(exists=False), required=True, help="output directory"
)
@option(
"-z", "--compress-output", is_flag=True, help="gzip-compress output fastq files"
)
@option(
"-n",
"--no-wrap",
is_flag=True,
help="fastq inputs are not wrapped (i.e., 4 lines per record)",
)
def zip_reads_barcodes(input, barcodes, mapping, output, compress_output, no_wrap):
"""Zip reads with barcodes and split into files.
Some older versions of the Illumina pipeline would not annotate the reads
with their corresponding barcodes, but would leave the barcode reads in a
separate fastq file. This tool will take both fastq files and will modify
the main fastq record to add the barcode to the header line (in the same
place Illumina would put it). It will the write one file per sample as
provided in the mapping.
This should only be necessary on older data files. Newer pipelines that use
bcl2fastq2 or the "generate fastq" pipeline in Basespace (starting 9/2016)
should not require this.
This tool requires that the reads are presented in the same order in the
two input files (which should be the case).
This tool should be used very rarely.
"""
from .utils import load_mapping, edit1_mapping
if no_wrap:
from .utils import read_fastq_nowrap as fastq_parser
else:
from .utils import readfq as fastq_parser
os.makedirs(output, mode=0o755)
input = osp.abspath(input)
barcodes = osp.abspath(barcodes)
# generate all possible edit-1 BCs
bc2sample = edit1_mapping(load_mapping(mapping))
with open_maybe_compressed(input, "r") as r_h, open_maybe_compressed(
barcodes, "r"
) as b_h:
# open file handles for each sample
ext = "fastq.gz" if compress_output else "fastq"
output_handles = {
s: open_maybe_compressed(
pjoin(output, "{s}.{ext}".format(s=s, ext=ext)), "w"
)
for s in set(bc2sample.values())
}
try:
for (r_n, r_s, r_q), (b_n, b_s, b_q) in zip(
tqdm(fastq_parser(r_h)), fastq_parser(b_h)
):
assert r_n.split(maxsplit=1)[0] == b_n.split(maxsplit=1)[0]
try:
print(
"@{r_n}\n{r_s}\n+\n{r_q}".format(r_n=r_n, r_s=r_s, r_q=r_q),
file=output_handles[bc2sample[b_s]],
)
except KeyError:
continue
finally:
for h in output_handles.values():
h.close()
@cli.command(name="merge-columns")
@option(
"-i", "--input", required=True, help="input path (directory of tab-delim files)"
)
@option("-o", "--output", required=True, help="output path")
@option(
"-m",
"--method",
type=Choice(["iter", "outer"]),
default="iter",
help="merge/join method",
)
@option(
"-p",
"--position",
type=int,
default=1,
help="the field position to merge (0-indexed)",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
def merge_columns(input, output, method, position, index_cols):
"""Merge tab-delimited files.
input must be a directory containing `.tsv` files to merge.
method: iter -- concurrently iterate over lines of all files; assumes
row-keys are identical in each file
method: outer -- bona fide outer join of data in each file; loads all files
into memory and joins using pandas
"""
input_dir = os.path.abspath(input)
output_file = os.path.abspath(output)
input_files = glob(pjoin(input_dir, "*.tsv"))
if method == "iter":
file_iterators = [open(f, "r") for f in input_files]
file_headers = [osp.splitext(osp.basename(f))[0] for f in input_files]
with open(output_file, "w") as op:
# iterate through lines
for lines in zip(*file_iterators):
fields_array = [
[field.strip() for field in line.split("\t")] for line in lines
]
# check that join column is the same
for fields in fields_array[1:]:
assert fields_array[0][:index_cols] == fields[:index_cols]
merged_fields = fields_array[0][:index_cols] + [
f[position] for f in fields_array
]
print("\t".join(merged_fields), file=op)
elif method == "outer":
def load(path):
icols = list(range(index_cols))
ucols = icols + [position]
return pd.read_csv(
path, sep="\t", header=0, dtype=str, index_col=icols, usecols=ucols
)
dfs = [load(path) for path in input_files]
merge = lambda l, r: pd.merge(
l, r, how="outer", left_index=True, right_index=True
)
df = reduce(merge, dfs).fillna(0)
df.to_csv(output, sep="\t", float_format="%.2f")
@cli.command(name="normalize-counts")
@option("-i", "--input", required=True, help="input counts (tab-delim)")
@option("-o", "--output", required=True, help="output path")
@option(
"-m",
"--method",
type=Choice(["col-sum", "size-factors"]),
default="size-factors",
help="normalization method",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
def normalize_counts(input, output, method, index_cols):
"""Normalize count matrix.
Two methods for normalizing are available:
* Size factors from Anders and Huber 2010 (similar to TMM)
* Normalize to constant column-sum of 1e6
"""
df = pd.read_csv(input, sep="\t", header=0, index_col=list(range(index_cols)))
if method == "col-sum":
normalized = df / (df.sum() / 1e6)
elif method == "size-factors":
factors = compute_size_factors(df.values)
normalized = df / factors
normalized.to_csv(output, sep="\t", float_format="%.2f")
@cli.command(name="count-exact-matches")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input fastq (gzipped ok)",
)
@option(
"-o",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input fastq (gzipped ok)",
)
@option(
"-r",
"--reference",
required=True,
type=Path(exists=True, dir_okay=False),
help="path to reference (input) counts file (tab-delim)",
)
@option(
"-l",
"--read-length",
required=True,
type=int,
help="read length (or, number of bases to use for matching)",
metavar="<read-length>",
)
def count_exact_matches(input, reference, read_length):
"""Match reads to reference exactly.
Takes the first <read-length> bases of each read and attempt to match it
exactly to the reference sequences. Computes the number of matches for each
reference.
"""
# load reference
seq_to_ref = OrderedDict()
with open(reference, "r") as ip:
for (ref_name, seq, _) in readfq(ip):
seq_to_ref[seq[: params.read_length]] = ref_name
num_reads = 0
num_matched = 0
counts = Counter()
with gzip.open(input, "rt") as ip:
for (name, seq, _) in tqdm(readfq(ip)):
num_reads += 1
refname = seq_to_ref.get(seq)
if refname is not None:
num_matched += 1
counts[refname] += 1
print(
"num_reads: {}\nnum_matched: {}\nfrac_matched: {}".format(
num_reads, num_matched, num_matched / num_reads
),
file=sys.stderr,
)
with open(output[0], "w") as op:
print("id\t{}".format(wildcards.sample), file=op)
for (_, refname) in seq_to_ref.items():
print("{}\t{}".format(refname, counts[refname]), file=op)
# DEPRECATED TOOLS
@cli.command(name="split-fastq", deprecated=True)
@option("-i", "--input", required=True, help="input path (fastq file)")
@option("-o", "--output", required=True, help="output path (directory)")
@option("-n", "--chunk-size", type=int, required=True, help="number of reads per chunk")
def split_fastq(input, output, chunk_size):
"""Split fastq files into smaller chunks."""
input_file = osp.abspath(input)
output_dir = osp.abspath(output)
os.makedirs(output_dir, mode=0o755)
# convenience functions
output_file = lambda i: pjoin(output_dir, "part.{0}.fastq".format(i))
with open_maybe_compressed(input_file, "r") as input_handle:
num_processed = 0
file_num = 1
for (name, seq, qual) in readfq(input_handle):
if num_processed == 0:
op = open_maybe_compressed(output_file(file_num), "w")
print(f"@{name}\n{seq}\n+\n{qual}", file=op)
num_processed += 1
if num_processed == chunk_size:
op.close()
num_processed = 0
file_num += 1
if not op.closed:
op.close()
@cli.command(name="align-parts", deprecated=True)
@option("-i", "--input", required=True, help="input path (directory of fastq parts)")
@option("-o", "--output", required=True, help="output path (directory)")
@option(
"-x", "--index", required=True, help="bowtie index (e.g., as specified to bowtie2)"
)
@option(
"-b",
"--batch-submit",
default="",
help="batch submit command to prefix bowtie command invocation",
)
@option(
"-p",
"--threads",
default=1,
help="Number of threads to specify for each invocation of bowtie",
)
@option(
"-3",
"--trim3",
default=0,
help="Number of bases to trim off of 3-end (passed to bowtie)",
)
@option("-d", "--dry-run", is_flag=True, help="Dry run; print out commands to execute")
def align_parts(input, output, index, batch_submit, threads, trim3, dry_run):
"""Align fastq files to peptide reference."""
input_dir = osp.abspath(input)
output_dir = osp.abspath(output)
if not dry_run:
os.makedirs(output_dir, mode=0o755)
bowtie_cmd_template = (
"bowtie -n 3 -l 100 --best --nomaqround --norc -k 1 -p {threads} "
"-3 {trim3} --quiet {index} {input} {output}"
)
for input_file in glob(pjoin(input_dir, "*.fastq")):
output_file = pjoin(
output_dir, osp.splitext(osp.basename(input_file))[0] + ".aln"
)
bowtie_cmd = bowtie_cmd_template.format(
index=index,
input=input_file,
output=output_file,
threads=threads,
trim3=trim3,
)
submit_cmd = "{batch_cmd} {app_cmd}".format(
batch_cmd=batch_submit, app_cmd=bowtie_cmd
)
if dry_run:
print(submit_cmd.strip())
else:
p = Popen(
submit_cmd.strip(), shell=True, stdout=PIPE, universal_newlines=True
)
print(p.communicate()[0])
@cli.command(name="compute-counts", deprecated=True)
@option("-i", "--input", required=True, help="input path (directory of aln files)")
@option("-o", "--output", required=True, help="output path (directory)")
@option(
"-r",
"--reference",
required=True,
help="path to reference (input) counts file (tab-delim)",
)
def compute_counts(input, output, reference):
"""Compute counts from aligned bam file."""
input_dir = osp.abspath(input)
output_dir = osp.abspath(output)
os.makedirs(output_dir, mode=0o755)
# load reference (i.e., input) counts
ref_names = []
ref_counts = []
with open(reference, "r") as ip:
# burn header
_ = next(ip)
for line in ip:
fields = line.split("\t")
ref_names.append(fields[0].strip())
ref_counts.append(round(float(fields[1])))
# compute count dicts
for input_file in glob(pjoin(input_dir, "*.aln")):
print(input_file)
sys.stdout.flush()
counts = {}
sample = osp.splitext(osp.basename(input_file))[0]
# accumulate counts
with open(input_file, "r") as ip:
for line in ip:
ref_clone = line.split("\t")[2].strip()
counts[ref_clone] = counts.get(ref_clone, 0) + 1
# write counts
output_file = pjoin(output_dir, sample + ".tsv")
with open(output_file, "w") as op:
print("id\tinput\t{0}".format(sample), file=op)
for (ref_name, ref_count) in zip(ref_names, ref_counts):
record = "{0}\t{1}\t{2}".format(
ref_name, ref_count, counts.get(ref_name, 0)
)
print(record, file=op)
@cli.command(name="gen-covariates", deprecated=True)
@option("-i", "--input", required=True, help="input path to merged count file")
@option(
"-s", "--substring", required=True, help="substring to match against column names"
)
@option("-o", "--output", required=True, help="output file (recommend .tsv extension)")
def gen_covariates(input, substring, output):
"""Compute covariates for input to stat model.
The input (`-i`) should be the merged counts file. Each column name is
matched against the given substring. The median coverage-normalized value
of each row from the matching columns will be output into a tab-delim file.
This file can be used as the "reference" values for computing p-values.
"""
input_file = osp.abspath(input)
output_file = osp.abspath(output)
counts = pd.read_csv(input_file, sep="\t", header=0, index_col=0)
matched_columns = [col for col in counts.columns if substring in col]
sums = counts[matched_columns].sum()
normed = counts[matched_columns] / sums * sums.median()
medians = normed.median(axis=1)
medians.name = "input"
medians.to_csv(output_file, sep="\t", header=True, index_label="id")
@cli.command(name="compute-pvals", deprecated=True)
@option("-i", "--input", required=True, help="input path")
@option("-o", "--output", required=True, help="output path")
@option(
"-b",
"--batch-submit",
help="batch submit command to prefix pval command invocation",
)
@option(
"-d",
"--dry-run",
is_flag=True,
help="Dry run; print out commands to execute for batch submit",
)
def compute_pvals(input, output, batch_submit, dry_run):
"""Compute p-values from counts."""
from .genpois import (
estimate_GP_distributions,
lambda_theta_regression,
precompute_pvals,
)
if batch_submit is not None:
# run compute-pvals on each file using batch submit command
input_dir = osp.abspath(input)
output_dir = osp.abspath(output)
if not dry_run:
os.makedirs(output_dir, mode=0o755)
pval_cmd_template = "phip compute-pvals -i {input} -o {output}"
for input_file in glob(pjoin(input_dir, "*.tsv")):
sample = osp.splitext(osp.basename(input_file))[0]
output_file = pjoin(output_dir, "{0}.pvals.tsv".format(sample))
pval_cmd = pval_cmd_template.format(input=input_file, output=output_file)
submit_cmd = "{batch_cmd} {app_cmd}".format(
batch_cmd=batch_submit, app_cmd=pval_cmd
)
if dry_run:
print(submit_cmd.strip())
else:
p = Popen(
submit_cmd.strip(), shell=True, stdout=PIPE, universal_newlines=True
)
print(p.communicate()[0])
else:
# actually compute p-vals on single file
input_file = osp.abspath(input)
output_file = osp.abspath(output)
clones = []
samples = None
input_counts = []
output_counts = []
with open(input_file, "r") as ip:
header_fields = next(ip).split("\t")
samples = [f.strip() for f in header_fields[2:]]
for line in tqdm(ip, desc="Loading data"):
fields = line.split("\t")
clones.append(fields[0].strip())
input_counts.append(int(fields[1]))
output_counts.append(np.int_(fields[2:]))
input_counts = np.asarray(input_counts)
# pseudocounts to combat negative regressed theta:
output_counts = np.asarray(output_counts) + 1
uniq_input_values = list(set(input_counts))
# Estimate generalized Poisson distributions for every input count
(lambdas, thetas, idxs) = estimate_GP_distributions(
input_counts, output_counts, uniq_input_values
)
# Regression on all of the theta and lambda values computed
(lambda_fits, theta_fits) = lambda_theta_regression(lambdas, thetas, idxs)
# Precompute CDF for possible input-output combinations
uniq_combos = []
for i in range(output_counts.shape[1]):
uniq_combos.append(set(zip(input_counts, output_counts[:, i])))
log10pval_hash = precompute_pvals(lambda_fits, theta_fits, uniq_combos)
# Compute p-values for each clone using regressed GP parameters
with open(output_file, "w") as op:
header = "\t".join(["id"] + samples)
print(header, file=op)
for (clone, ic, ocs) in zip(
tqdm(clones, desc="Writing scores"), input_counts, output_counts
):
fields = [clone]
for (i, oc) in enumerate(ocs):
fields.append("{:.2f}".format(log10pval_hash[(i, ic, oc)]))
print("\t".join(fields), file=op)
| apache-2.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/sparse/frame.py | 1 | 29045 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231,E0202
from numpy import nan
from pandas.compat import lmap
from pandas import compat
import numpy as np
from pandas.compat.numpy import function as nv
from pandas.core.common import isnull, _try_sort
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.series import Series
from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,
_default_index)
import pandas.core.common as com
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
from pandas.core.generic import NDFrame
from pandas.sparse.series import SparseSeries, SparseArray
from pandas.util.decorators import Appender
import pandas.core.ops as ops
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
"""
_constructor_sliced = SparseSeries
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if isinstance(data, dict):
mgr = self._init_dict(data, index, columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns)
if dtype is not None:
mgr = mgr.astype(dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = _ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = _ensure_index(columns)
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
else:
columns = Index(_try_sort(list(data.keys())))
if index is None:
index = extract_index(list(data.values()))
sp_maker = lambda x: SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True)
sdict = DataFrame()
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = sp_maker(v.values)
else:
if isinstance(v, dict):
v = [v.get(i, nan) for i in index]
v = sp_maker(v)
sdict[k] = v
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_vec = np.empty(len(index))
nan_vec.fill(nan)
for c in columns:
if c not in sdict:
sdict[c] = sp_maker(nan_vec)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
data = _prep_ndarray(data, copy=False)
N, K = data.shape
if index is None:
index = _default_index(N)
if columns is None:
columns = _default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: %d vs. %d' %
(len(columns), K))
if len(index) != N:
raise ValueError('Index length mismatch: %d vs. %d' %
(len(index), N))
data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)])
return self._init_dict(data, index, columns, dtype)
def __array_wrap__(self, result):
return SparseDataFrame(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
""" original pickle format """
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in compat.iteritems(series):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = dict((k, v.to_dense()) for k, v in compat.iteritems(self))
return DataFrame(data, index=self.index, columns=self.columns)
def astype(self, dtype):
raise NotImplementedError
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super(SparseDataFrame, self).copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
@property
def default_fill_value(self):
return self._default_fill_value
@property
def default_kind(self):
return self._default_kind
@property
def density(self):
"""
Ratio of non-sparse points to total (dense) data points
represented in the frame
"""
tot_nonsparse = sum([ser.sp_index.npoints
for _, ser in compat.iteritems(self)])
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
new_self = super(SparseDataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast)
if not inplace:
self = new_self
# set the fill value if we are filling as a scalar with nothing special
# going on
if (value is not None and value == value and method is None and
limit is None):
self._default_fill_value = value
if not inplace:
return self
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
def _sanitize_column(self, key, value):
sp_maker = lambda x, index=None: SparseArray(
x, index=index, fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
def __getitem__(self, key):
"""
Retrieve column or slice from DataFrame
"""
if isinstance(key, slice):
date_rng = self.index[key]
return self.reindex(date_rng)
elif isinstance(key, (np.ndarray, list, Series)):
return self._getitem_array(key)
else:
return self._get_item_cache(key)
@Appender(DataFrame.get_value.__doc__, indents=0)
def get_value(self, index, col, takeable=False):
if takeable is True:
series = self._iget_item_cache(col)
else:
series = self._get_item_cache(col)
return series.get_value(index, takeable=takeable)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Notes
-----
This method *always* returns a new object. It is currently not
particularly efficient (and potentially very expensive) but is provided
for API compatibility with DataFrame
Returns
-------
frame : DataFrame
"""
dense = self.to_dense().set_value(index, col, value, takeable=takeable)
return dense.to_sparse(kind=self._default_kind,
fill_value=self._default_fill_value)
def _slice(self, slobj, axis=0, kind=None):
if axis == 0:
new_index = self.index[slobj]
new_columns = self.columns
else:
new_index = self.index
new_columns = self.columns[slobj]
return self.reindex(index=new_index, columns=new_columns)
def xs(self, key, axis=0, copy=False):
"""
Returns a row (cross-section) from the SparseDataFrame as a Series
object.
Parameters
----------
key : some index contained in the index
Returns
-------
xs : Series
"""
if axis == 1:
data = self[key]
return data
i = self.index.get_loc(key)
data = self.take([i]).get_values()[0]
return Series(data, index=self.columns)
# ----------------------------------------------------------------------
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
if level is not None:
raise NotImplementedError("'level' argument is not supported")
if self.empty and other.empty:
return SparseDataFrame(index=new_index).__finalize__(self)
new_data = {}
new_fill_value = None
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
# if the fill values are the same use them? or use a valid one
other_fill_value = getattr(other, 'default_fill_value', np.nan)
if self.default_fill_value == other_fill_value:
new_fill_value = self.default_fill_value
elif np.isnan(self.default_fill_value) and not np.isnan(
other_fill_value):
new_fill_value = other_fill_value
elif not np.isnan(self.default_fill_value) and np.isnan(
other_fill_value):
new_fill_value = self.default_fill_value
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
default_fill_value=new_fill_value
).__finalize__(self)
def _combine_match_index(self, other, func, level=None, fill_value=None):
new_data = {}
if fill_value is not None:
raise NotImplementedError("'fill_value' argument is not supported")
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_index = self.index.union(other.index)
this = self
if self.index is not new_index:
this = self.reindex(new_index)
if other.index is not new_index:
other = other.reindex(new_index)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
# fill_value is a function of our operator
if isnull(other.fill_value) or isnull(self.default_fill_value):
fill_value = np.nan
else:
fill_value = func(np.float64(self.default_fill_value),
np.float64(other.fill_value))
return self._constructor(
new_data, index=new_index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
# patched version of DataFrame._combine_match_columns to account for
# NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
# where 3.0 is numpy.float64 and series is a SparseSeries. Still
# possible for this to happen, which is bothersome
if fill_value is not None:
raise NotImplementedError("'fill_value' argument is not supported")
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_data = {}
union = intersection = self.columns
if not union.equals(other.index):
union = other.index.union(self.columns)
intersection = other.index.intersection(self.columns)
for col in intersection:
new_data[col] = func(self[col], float(other[col]))
return self._constructor(
new_data, index=self.index, columns=union,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func):
new_data = {}
for col, series in compat.iteritems(self):
new_data[col] = func(series, other)
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return SparseDataFrame(index=index, columns=self.columns)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = com._ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
np.putmask(new, mask, fill_value)
new_series[col] = new
return SparseDataFrame(new_series, index=index, columns=self.columns,
default_fill_value=self._default_fill_value)
def _reindex_columns(self, columns, copy, level, fill_value, limit=None,
takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if com.notnull(fill_value):
raise NotImplementedError("'fill_value' argument is not supported")
if limit:
raise NotImplementedError("'limit' argument is not supported")
# TODO: fill value handling
sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns)
return SparseDataFrame(sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value)
def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
limit=None, copy=False, allow_dups=False):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit "
"with sparse")
if fill_value is None:
fill_value = np.nan
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(self[col].get_values(),
row_indexer,
fill_value=fill_value)
else:
new_arrays[col] = self[col]
return SparseDataFrame(new_arrays, index=index,
columns=columns).__finalize__(self)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
if on is not None:
raise NotImplementedError("'on' keyword parameter is not yet "
"implemented")
return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = SparseDataFrame(
{other.name: other},
default_fill_value=self._default_fill_value)
join_index = self.index.join(other.index, how=how)
this = self.reindex(join_index)
other = other.reindex(join_index)
this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
from pandas import concat
return concat([this, other], axis=1, verify_integrity=True)
def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s'
% to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
this = self.rename(columns=lrenamer)
other = other.rename(columns=rrenamer)
else:
this = self
return this, other
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return SparseDataFrame(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
T = property(transpose)
@Appender(DataFrame.count.__doc__)
def count(self, axis=0, **kwds):
return self.apply(lambda x: x.count(), axis=axis)
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
return self.apply(lambda x: x.cumsum(), axis=axis)
def apply(self, func, axis=0, broadcast=False, reduce=False):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in compat.iteritems(self):
applied = func(v)
applied.fill_value = func(applied.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
else:
if not broadcast:
return self._apply_standard(func, axis, reduce=reduce)
else:
return self._apply_broadcast(func, axis)
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
"""
return self.apply(lambda x: lmap(func, x))
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
nobs = sum(lengths)
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in compat.iteritems(frame):
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
labels=[major_labels, minor_labels],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sortlevel(level=0)
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in compat.iteritems(series_dict):
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in compat.iteritems(series_dict):
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output
# use unaccelerated ops for sparse objects
ops.add_flex_arithmetic_methods(SparseDataFrame, use_numexpr=False,
**ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(SparseDataFrame, use_numexpr=False,
**ops.frame_special_funcs)
| mit |
djgagne/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.