repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rohanp/scikit-learn
|
examples/feature_selection/plot_select_from_model_boston.py
|
146
|
1527
|
"""
===================================================
Feature selection using SelectFromModel and LassoCV
===================================================
Use SelectFromModel meta-transformer along with Lasso to select the best
couple of features from the Boston dataset.
"""
# Author: Manoj Kumar <[email protected]>
# License: BSD 3 clause
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston['data'], boston['target']
# We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
clf = LassoCV()
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
|
bsd-3-clause
|
pyxll/pyxll-examples
|
matplotlib/embeddedplot.py
|
1
|
2963
|
"""
Example code showing how to draw a matplotlib figure embedded
in an Excel worksheet.
Matplotlib is used to plot a chart to an image, which is then
displayed as a Picture object in Excel.
"""
from pyxll import xl_func, xlfCaller
from pandas.stats.moments import ewma
import os
# matplotlib imports
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
# For interacting with Excel from Python
from pyxll import get_active_object
import win32com.client
@xl_func("string figname, "
"numpy_column<float> xs, "
"numpy_column<float> ys, "
"int span: string",
macro=True)
def mpl_plot_ewma_embedded(figname, xs, ys, span):
# create the figure and axes for the plot
fig = Figure(figsize=(8, 6), dpi=75, facecolor=(1, 1, 1), edgecolor=(0, 0, 0))
ax = fig.add_subplot(111)
# calculate the moving average
ewma_ys = ewma(ys, span=span)
# plot the data
ax.plot(xs, ys, alpha=0.4, label="Raw")
ax.plot(xs, ewma_ys, label="EWMA")
ax.legend()
# write the figure to a temporary image file
filename = os.path.join(os.environ["TEMP"], "xlplot_%s.png" % figname)
canvas = FigureCanvas(fig)
canvas.draw()
canvas.print_png(filename)
# Show the figure in Excel as a Picture object on the same sheet
# the function is being called from.
xl = xl_app()
caller = xlfCaller()
sheet = xl.Range(caller.address).Worksheet
# if a picture with the same figname already exists then get the position
# and size from the old picture and delete it.
for old_picture in sheet.Pictures():
if old_picture.Name == figname:
height = old_picture.Height
width = old_picture.Width
top = old_picture.Top
left = old_picture.Left
old_picture.Delete()
break
else:
# otherwise place the picture below the calling cell.
top_left = sheet.Cells(caller.rect.last_row+2, caller.rect.last_col+1)
top = top_left.Top
left = top_left.Left
width, height = fig.bbox.bounds[2:]
# insert the picture
# Ref: http://msdn.microsoft.com/en-us/library/office/ff198302%28v=office.15%29.aspx
picture = sheet.Shapes.AddPicture(Filename=filename,
LinkToFile=0, # msoFalse
SaveWithDocument=-1, # msoTrue
Left=left,
Top=top,
Width=width,
Height=height)
# set the name of the new picture so we can find it next time
picture.Name = figname
# delete the temporary file
os.unlink(filename)
return "[Plotted '%s']" % figname
def xl_app():
xl_window = get_active_object()
xl_app = win32com.client.Dispatch(xl_window).Application
return xl_app
|
unlicense
|
jlegendary/scikit-learn
|
sklearn/cross_decomposition/tests/test_pls.py
|
215
|
11427
|
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
bsd-3-clause
|
ashtonmv/twod_materials
|
twod_materials/intercalation/analysis.py
|
1
|
5583
|
from __future__ import print_function, division, unicode_literals
import os
import numpy as np
from scipy.spatial import ConvexHull
from twod_materials.utils import is_converged
from pymatgen.core.structure import Structure
from pymatgen.core.composition import Composition
from pymatgen.io.vasp.outputs import Vasprun
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import operator
def plot_ion_hull_and_voltages(ion, fmt='pdf'):
"""
Plots the phase diagram between the pure material and pure ion,
Connecting the points on the convex hull of the phase diagram.
Args:
ion (str): name of atom that was intercalated, e.g. 'Li'.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
# Calculated with the relax() function in
# twod_materials.stability.startup. If you are using other input
# parameters, you need to recalculate these values!
ion_ev_fu = {'Li': -1.7540797, 'Mg': -1.31976062, 'Al': -3.19134607}
energy = Vasprun('vasprun.xml').final_energy
composition = Structure.from_file('POSCAR').composition
# Get the formula (with single-digit integers preceded by a '_').
twod_material = list(composition.reduced_formula)
twod_formula = str()
for i in range(len(twod_material)):
try:
int(twod_material[i])
twod_formula += '_{}'.format(twod_material[i])
except:
twod_formula += twod_material[i]
twod_ev_fu = energy / composition.get_reduced_composition_and_factor()[1]
data = [(0, 0, 0, twod_ev_fu)] # (at% ion, n_ions, E_F, abs_energy)
for directory in [
dir for dir in os.listdir(os.getcwd()) if os.path.isdir(dir)]:
if is_converged(directory):
os.chdir(directory)
energy = Vasprun('vasprun.xml').final_energy
composition = Structure.from_file('POSCAR').composition
ion_fraction = composition.get_atomic_fraction(ion)
no_ion_comp_dict = composition.as_dict()
no_ion_comp_dict.update({ion: 0})
no_ion_comp = Composition.from_dict(no_ion_comp_dict)
n_twod_fu = no_ion_comp.get_reduced_composition_and_factor()[1]
n_ions = composition[ion] / n_twod_fu
E_F = (
(energy - composition[ion] * ion_ev_fu[ion]
- twod_ev_fu * n_twod_fu)
/ composition.num_atoms
)
data.append((ion_fraction, n_ions, E_F, energy / n_twod_fu))
os.chdir('../')
data.append((1, 1, 0, ion_ev_fu[ion])) # Pure ion
sorted_data = sorted(data, key=operator.itemgetter(0))
# Determine which compositions are on the convex hull.
energy_profile = np.array([[item[0], item[2]]
for item in sorted_data if item[2] <= 0])
hull = ConvexHull(energy_profile)
convex_ion_fractions = [
energy_profile[vertex, 0] for vertex in hull.vertices]
convex_formation_energies = [
energy_profile[vertex, 1] for vertex in hull.vertices]
convex_ion_fractions.append(convex_ion_fractions.pop(0))
convex_formation_energies.append(convex_formation_energies.pop(0))
concave_ion_fractions = [
pt[0] for pt in sorted_data if pt[0] not in convex_ion_fractions]
concave_formation_energies = [
pt[2] for pt in sorted_data if pt[0] not in convex_ion_fractions]
voltage_profile = []
j = 0
k = 0
for i in range(1, len(sorted_data) - 1):
if sorted_data[i][0] in convex_ion_fractions:
voltage = -(
((sorted_data[i][3] - sorted_data[k][3])
- (sorted_data[i][1] - sorted_data[k][1]) * ion_ev_fu[ion])
/ (sorted_data[i][1] - sorted_data[k][1])
)
voltage_profile.append((sorted_data[k][0], voltage))
voltage_profile.append((sorted_data[i][0], voltage))
j += 1
k = i
voltage_profile.append((voltage_profile[-1][0], 0))
voltage_profile.append((1, 0))
voltage_profile_x = [tup[0] for tup in voltage_profile]
voltage_profile_y = [tup[1] for tup in voltage_profile]
ax = plt.figure(figsize=(14, 10)).gca()
ax.plot([0, 1], [0, 0], 'k--')
ax.plot(convex_ion_fractions, convex_formation_energies, 'b-', marker='o',
markersize=12, markeredgecolor='none')
ax.plot(concave_ion_fractions, concave_formation_energies, 'r', marker='o',
linewidth=0, markersize=12, markeredgecolor='none')
ax2 = ax.twinx()
ax2.plot(voltage_profile_x, voltage_profile_y, 'k-', marker='o')
ax.text(0, 0.002, r'$\mathrm{%s}$' % twod_formula, family='serif', size=24)
ax.text(0.99, 0.002, r'$\mathrm{%s}$' % ion, family='serif', size=24,
horizontalalignment='right')
ax.set_xticklabels(ax.get_xticks(), family='serif', size=20)
ax.set_yticklabels(ax.get_yticks(), family='serif', size=20)
ax2.set_yticklabels(ax2.get_yticks(), family='serif', size=20)
ax.set_xlabel('at% {}'.format(ion), family='serif', size=28)
ax.set_ylabel(r'$\mathrm{E_F\/(eV/atom)}$', size=28)
ax2.yaxis.set_label_position('right')
if ion == 'Li':
ax2.set_ylabel(r'$\mathrm{Potential\/vs.\/Li/Li^+\/(V)}$', size=28)
elif ion == 'Mg':
ax2.set_ylabel(r'$\mathrm{Potential\/vs.\/Mg/Mg^{2+}\/(V)}$', size=28)
elif ion == 'Al':
ax2.set_ylabel(r'$\mathrm{Potential\/vs.\/Al/Al^{3+}\/(V)}$', size=28)
plt.savefig('{}_hull.{}'.format(ion, fmt), transparent=True)
|
gpl-3.0
|
alvarofierroclavero/scikit-learn
|
examples/linear_model/plot_ransac.py
|
250
|
1673
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
|
bsd-3-clause
|
CVML/scikit-learn
|
examples/cluster/plot_lena_compress.py
|
271
|
2229
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
|
bsd-3-clause
|
yuruofeifei/mxnet
|
python/mxnet/model.py
|
13
|
41314
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import os
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = 16
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
# pull back the weights
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
|
apache-2.0
|
BenLand100/ScratchCode
|
piab.py
|
1
|
1854
|
#!/usr/bin/python
"""
* Copyright 2011 by Benjamin J. Land (a.k.a. BenLand100)
*
* This file is part of ScratchCode.
*
* ScratchCode is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ScratchCode is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ScratchCode. If not, see <http://www.gnu.org/licenses/>.
"""
"""Particle in a Box, quantum mechanics simulation"""
from math import *
from numpy import *
from matplotlib.pyplot import *
dt = 1.00e-2
L = 5.00
m = 1.00-2
hbar = 1.05e-34
i = complex(0,1)
def energy(n):
return (n**2*pi**2*hbar)/(2*L**2*m)
def wavefunc(n):
E = energy(n)
return lambda x,t: sqrt(2.0/L)*sin(n*pi/L*x)*exp(-i*E/hbar*t)
def sumwaves(levels):
waves = [wavefunc(n) for n in levels]
return lambda x,t: sum([wave(x,t) for wave in waves])/sqrt(len(levels))
fig = figure()
ax = fig.add_subplot(111)
N = 500
dN = L / N
wave = sumwaves(range(1,5))
loc = [n*dN for n in range(0,N)]
prob = [wave(x,0) for x in loc]
prob = [p*p.conjugate() for p in prob]
lines, = ax.plot(loc,prob)
def draw():
print draw.t*dt
prob = [wave(x,draw.t*dt) for x in loc]
prob = [p*p.conjugate() for p in prob]
#integral = sum([(prob[j]+prob[j+1])/2.0*dN for j in range(0,N-1)])
#print integral
lines.set_data(loc,prob)
ax.figure.canvas.draw()
draw.t += 1
fig.canvas.manager.window.after(0, draw)
draw.t = 0
fig.canvas.manager.window.after(0, draw)
show()
|
gpl-3.0
|
imaculate/scikit-learn
|
examples/cluster/plot_cluster_comparison.py
|
58
|
4681
|
"""
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
|
bsd-3-clause
|
GonzaloFdeCordoba/MoU4Greece
|
Wonk-0.0.2.py
|
1
|
30515
|
#!/usr/bin/python
#import pylab as pl
import numpy as np
from scipy.optimize import fsolve
from GreeceModelRBFOC import GreeceModelRBFOC
from openpyxl.reader.excel import load_workbook
from openpyxl import Workbook
import sys, os
from Tkinter import *
import timeit
import tkMessageBox
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
from openpyxl.compat import range
from openpyxl.utils import get_column_letter
wb = load_workbook(filename = 'MoU.xlsx')
count = 1
#Buttons government
def click_tau_k():
try:
tau_k = float(entrada_tau_k.get())
etiqueta_tau_k.config(text = "t_k = "+ "%.3f" %tau_k)
except ValueError:
etiqueta_tau_k.config(text = "InvaLid value")
def click_tau_l():
try:
tau_l = float(entrada_tau_l.get())
etiqueta_tau_l.config(text = "t_l = "+ "%.3f" %tau_l)
except ValueError:
etiqueta_tau_l.config(text = "Invalid value")
def click_tau_c():
try:
tau_c = float(entrada_tau_c.get())
etiqueta_tau_c.config(text = "t_c = "+ "%.3f" %tau_c)
except ValueError:
etiqueta_tau_c.config(text = "Invalid value")
def click_tau_ss():
try:
tau_ss = float(entrada_tau_ss.get())
etiqueta_tau_ss.config(text = "t_ss = "+ "%.3f" %tau_ss)
except ValueError:
etiqueta_tau_ss.config(text = "Invalid value")
def click_tau_pi():
try:
tau_pi = float(entrada_tau_pi.get())
etiqueta_tau_pi.config(text ="t_pi = "+ "%.3f" %tau_pi)
except ValueError:
etiqueta_tau_pi.config(text ="Invalid value")
def click_cita_1():
try:
cita_1 = float(entrada_cita_1.get())
etiqueta_cita_1.config(text="c_1 = "+"%.3f" %cita_1)
except ValueError:
etiqueta_cita_1.config(text="Invalid value")
def click_cita_2():
try:
cita_2 = float(entrada_cita_2.get())
etiqueta_cita_2.config(text="c_2 = "+"%.3f" %cita_2)
except ValueError:
etiqueta_cita_2.config(text="Invalid value")
def click_cita_3():
try:
cita_3 = float(entrada_cita_3.get())
etiqueta_cita_3.config(text="c_3 = "+"%.3f" %cita_3)
except ValueError:
etiqueta_cita_3.config(text="Invalid value")
def click_cita_4():
try:
cita_4 = float(entrada_cita_4.get())
etiqueta_cita_4.config(text="c_4 = " +"%.3f" %cita_4)
except ValueError:
etiqueta_cita_4.config(text="Invalid value")
#Buttons Preferences
def click_gamma():
try:
gamma = float(entrada_gamma.get())
etiqueta_gamma.config(text="gamma = "+"%.3f" %gamma)
except ValueError:
etiqueta_gamma.config(text="Invalid value")
def click_rho():
try:
rho = float(entrada_rho.get())
etiqueta_rho.config(text="rho = "+"%.3f" %rho)
except ValueError:
etiqueta_rho.config(text="Invalid value")
def click_beta():
try:
beta = float(entrada_beta.get())
etiqueta_beta.config(text="beta = "+"%.3f" %beta)
except ValueError:
etiqueta_beta.config(text="Invalid value")
def click_omega():
try:
omega = float(entrada_omega.get())
etiqueta_omega.config(text="omega = "+"%.3f" %omega)
except ValueError:
etiqueta_omega.config(text="Invalid value")
#Buttons Technology
def click_A():
try:
A = float(entrada_A.get())
etiqueta_A.config(text="A = "+ "%.3f" %A)
except ValueError:
etiqueta_A.config(text="Invalid value")
def click_alpha_p():
try:
alpha_p = float(entrada_alpha_p.get())
etiqueta_alpha_p.config(text="a_p = "+"%.3f" %alpha_p)
except ValueError:
etiqueta_alpha_p.config(text="Invalid value")
def click_alpha_g():
try:
alpha_g = float(entrada_alpha_g.get())
etiqueta_alpha_g.config(text="a_g = "+"%.3f" %alpha_g)
except ValueError:
etiqueta_alpha_g.config(text="Invalid value")
def click_alpha_l():
try:
alpha_l = float(entrada_alpha_l.get())
etiqueta_alpha_l.config(text="a_l = "+"%.3f" %alpha_l)
except ValueError:
etiqueta_alpha_l.config(text="Invalid value")
def click_eta():
try:
eta = float(entrada_eta.get())
etiqueta_eta.config(text="eta = "+"%.3f" %eta)
except ValueError:
etiqueta_eta.config(text="Invalid value")
def click_mu():
try:
mu = float(entrada_mu.get())
etiqueta_mu.config(text="mu = "+"%.3f" %mu)
except ValueError:
etiqueta_mu.config(text="Invalid value")
def click_delta_p():
try:
delta_p = float(entrada_delta_p.get())
etiqueta_delta_p.config(text="d_p = "+"%.3f" %delta_p)
except ValueError:
etiqueta_delta_p.config(text="Invalid value")
def click_delta_g():
try:
delta_g = float(entrada_delta_g.get())
etiqueta_delta_g.config(text="d_g = "+"%.3f" %delta_g)
except ValueError:
etiqueta_delta_g.config(text="Invalid value")
#Buttons Environment
def click_debt():
try:
debt = float(entrada_debt.get())
etiqueta_debt.config(text="B = "+"%.3f" %debt)
except ValueError:
etiqueta_debt.config(text="Invalid value")
def click_bund_r():
try:
bund_r = float(entrada_bund_r.get())
etiqueta_bund_r.config(text="bund_r = "+"%.3f" %bund_r)
except ValueError:
etiqueta_bund_r.config(text="Invalid value")
def click_risk_p():
try:
risk_p = float(entrada_risk_p.get())
etiqueta_risk_p.config(text="r_p = "+"%.3f" %risk_p)
except ValueError:
etiqueta_risk_p.config(text="Invalid value")
def check_values():
count = 0
alpha_p = float(entrada_alpha_p.get())
alpha_g = float(entrada_alpha_g.get())
alpha_l = float(entrada_alpha_l.get())
if (alpha_p+alpha_g+alpha_l == 1.0):
count = count +1
pass
else:
tkMessageBox.showinfo('Sum error','alphas must add up to one')
cita_1 = float(entrada_cita_1.get())
cita_2 = float(entrada_cita_2.get())
cita_3 = float(entrada_cita_3.get())
cita_4 = float(entrada_cita_4.get())
if (cita_1+cita_2+cita_3+cita_4 == 1.0):
count = count +1
pass
else:
tkMessageBox.showinfo('Sum error','citas must add up to one')
rho = float(entrada_rho.get())
if (rho <= -1.0):
count = count +1
pass
else:
tkMessageBox.showinfo('rho error','rho < -1 required')
beta = float(entrada_beta.get())
if (beta < 1.0 and beta > 0):
count = count +1
pass
else:
tkMessageBox.showinfo('beta error','beta in (0,1) required')
A = float(entrada_A.get())
if (A > 0):
count = count +1
pass
else:
tkMessageBox.showinfo('A error','A > 0 required')
mu = float(entrada_mu.get())
if (mu < 1.0 and mu > 0):
count = count +1
pass
else:
tkMessageBox.showinfo('mu error','mu in (0,1) required')
eta = float(entrada_eta.get())
if (eta > 0):
count = count +1
pass
else:
tkMessageBox.showinfo('eta error','eta > 0 required')
delta_p = float(entrada_delta_p.get())
if (delta_p > 0):
count = count +1
pass
else:
tkMessageBox.showinfo('delta_p error','delta_p > 0 required')
delta_g = float(entrada_delta_g.get())
if (delta_g > 0):
count = count +1
pass
else:
tkMessageBox.showinfo('delta_g error','delta_g > 0 required')
alpha_p = float(entrada_alpha_p.get())
if (alpha_p > 0 and alpha_p < 1):
count = count +1
pass
else:
tkMessageBox.showinfo('alpha_p error','alpha_p in (0,1) required')
alpha_g = float(entrada_alpha_g.get())
if (alpha_g > 0 and alpha_g < 1):
count = count +1
pass
else:
tkMessageBox.showinfo('alpha_g error','alpha_g in (0,1) required')
alpha_l = float(entrada_alpha_l.get())
if (alpha_l > 0 and alpha_l < 1):
count = count +1
pass
else:
tkMessageBox.showinfo('alpha_l error','alpha_l in (0,1) required')
cita_1 = float(entrada_cita_1.get())
if (cita_1 > 0 and cita_1 < 1):
count = count +1
pass
else:
tkMessageBox.showinfo('cita_1 error','cita_1 in (0,1) required')
cita_2 = float(entrada_cita_2.get())
if (cita_2 > 0 and cita_2 < 1):
count = count +1
pass
else:
tkMessageBox.showinfo('cita_2 error','cita_2 in (0,1) required')
cita_3 = float(entrada_cita_3.get())
if (cita_3 > 0 and cita_3 < 1):
count = count +1
pass
else:
tkMessageBox.showinfo('cita_3 error','cita_3 in (0,1) required')
cita_4 = float(entrada_cita_4.get())
if (cita_4 > 0 and cita_4 < 1):
count = count +1
pass
else:
tkMessageBox.showinfo('cita_4 error','cita_4 in (0,1) required')
if count == 16:
#os.system("./GreekCalibrator.py")
run_program()
else:
print count
pass
def call_back_gov(sheet_name):
sheet = wb.get_sheet_by_name(sheet_name)
entrada_tau_k.delete(0, END)
entrada_tau_k.insert(END, sheet['B2'].value)
entrada_tau_l.delete(0, END)
entrada_tau_l.insert(END, sheet['B3'].value)
entrada_tau_c.delete(0, END)
entrada_tau_c.insert(END, sheet['B4'].value)
entrada_tau_ss.delete(0, END)
entrada_tau_ss.insert(END, sheet['B5'].value)
entrada_tau_pi.delete(0, END)
entrada_tau_pi.insert(END, sheet['B6'].value)
entrada_cita_1.delete(0, END)
entrada_cita_1.insert(END, sheet['B7'].value)
entrada_cita_2.delete(0, END)
entrada_cita_2.insert(END, sheet['B8'].value)
entrada_cita_3.delete(0, END)
entrada_cita_3.insert(END, sheet['B9'].value)
entrada_cita_4.delete(0, END)
entrada_cita_4.insert(END, sheet['B10'].value)
def call_back_pref(sheet_name):
sheet = wb.get_sheet_by_name(sheet_name)
entrada_gamma.delete(0, END)
entrada_gamma.insert(END, sheet['B26'].value)
entrada_beta.delete(0, END)
entrada_beta.insert(END, sheet['B25'].value)
entrada_rho.delete(0, END)
entrada_rho.insert(END, sheet['B23'].value)
entrada_omega.delete(0, END)
entrada_omega.insert(END, sheet['B22'].value)
def call_back_tech(sheet_name):
sheet = wb.get_sheet_by_name(sheet_name)
entrada_alpha_p.delete(0, END)
entrada_alpha_p.insert(END, sheet['B13'].value)
entrada_alpha_g.delete(0, END)
entrada_alpha_g.insert(END, sheet['B14'].value)
entrada_alpha_l.delete(0, END)
entrada_alpha_l.insert(END, sheet['B15'].value)
entrada_A.delete(0, END)
entrada_A.insert(END, sheet['B12'].value)
entrada_mu.delete(0, END)
entrada_mu.insert(END, sheet['B19'].value)
entrada_eta.delete(0, END)
entrada_eta.insert(END, sheet['B18'].value)
entrada_delta_p.delete(0, END)
entrada_delta_p.insert(END, sheet['B16'].value)
entrada_delta_g.delete(0, END)
entrada_delta_g.insert(END, sheet['B17'].value)
def call_back_env(sheet_name):
sheet = wb.get_sheet_by_name(sheet_name)
entrada_debt.delete(0, END)
entrada_debt.insert(END, sheet['B28'].value)
entrada_bund_r.delete(0, END)
entrada_bund_r.insert(END, sheet['B29'].value)
entrada_risk_p.delete(0, END)
entrada_risk_p.insert(END, sheet['B30'].value)
def call_back_all(sheet_name):
sheet = wb.get_sheet_by_name(sheet_name)
call_back_env(sheet_name)
call_back_tech(sheet_name)
call_back_pref(sheet_name)
call_back_gov(sheet_name)
#APP > Main window
app = Tk()
app.title("MoU Calculator")
app.geometry("700x730")
app.resizable(0,0)
#VP > Principal canvas
vp = Frame(app, bd = 3, relief=RAISED)
vp.grid(column=1, row=0, padx=(50,50), pady=(10,10))
vp.columnconfigure(0,weight=1)
vp.rowconfigure(0,weight=1)
vp1 = Frame(app, bd = 3, relief=RAISED)
vp1.grid(column=1, row=10, padx=(50,50), pady=(10,10))
vp2 = Frame(app, bd = 3, relief=RAISED)
vp2.grid(column=1, row=20, padx=(50,50), pady=(10,10))
vp3 = Frame(app, bd = 3, relief=RAISED)
vp3.grid(column=1, row=40, padx=(50,50), pady=(10,10))
vp4 = Frame(app, bd = 3, relief=RAISED)
vp4.grid(column=1, row=50, padx=(50,50), pady=(10,10))
T1 = Message(app, text = "Government parameters")
T1.config(width=200)
T1.grid(row=0, column=0 )
T2 = Message(app, text = "Preferences parameters")
T2.config(width=200)
T2.grid(row=10, column=0 )
T3 = Message(app, text = "Technology parameters")
T3.config(width=200)
T3.grid(row=20, column=0 )
T4 = Message(app, text = "Environment")
T4.config(width=200)
T4.grid(row=40, column=0 )
#Menus
topMenu = Menu(app)
app.config(menu=topMenu)
fileMenu = Menu(topMenu)
topMenu.add_cascade(label="File", menu=fileMenu)
fileMenu.add_command(label="Save Experiment", command=lambda: call_back_save())
env_menu = Menu(fileMenu)
fileMenu.add_cascade(label="Load Environment", menu=env_menu)
env_menu.add_command(label="Greece", command = lambda: call_back_env('Greece'))
env_menu.add_command(label="Germany", command= lambda: call_back_env('Germany'))
env_menu.add_command(label="Other", command= lambda: call_back_env('Other'))
tech_menu = Menu(fileMenu)
fileMenu.add_cascade(label="Load Technology", menu=tech_menu)
tech_menu.add_command(label="Greece", command = lambda: call_back_tech('Greece'))
tech_menu.add_command(label="Germany", command= lambda: call_back_tech('Germany'))
tech_menu.add_command(label="Other", command= lambda: call_back_tech('Other'))
pref_menu = Menu(fileMenu)
fileMenu.add_cascade(label="Load Preferences", menu=pref_menu)
pref_menu.add_command(label="Greece", command = lambda: call_back_pref('Greece'))
pref_menu.add_command(label="Germany", command= lambda: call_back_pref('Germany'))
pref_menu.add_command(label="Other", command= lambda: call_back_pref('Other'))
gov_menu = Menu(fileMenu)
fileMenu.add_cascade(label="Load Government", menu=gov_menu)
gov_menu.add_command(label="Greece", command = lambda: call_back_gov('Greece'))
gov_menu.add_command(label="Germany", command= lambda: call_back_gov('Germany'))
gov_menu.add_command(label="Other", command= lambda: call_back_gov('Other'))
all_menu = Menu(fileMenu)
fileMenu.add_cascade(label="Load all", menu=all_menu)
all_menu.add_command(label="Greece", command = lambda: call_back_all('Greece'))
all_menu.add_command(label="Germany", command= lambda: call_back_all('Germany'))
all_menu.add_command(label="Other", command= lambda: call_back_all('Other'))
fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=app.destroy)
toolsMenu = Menu(topMenu)
topMenu.add_cascade(label="Tools", menu=toolsMenu)
toolsMenu.add_command(label="Run experiment", command=check_values)
#Frame 1 (Government)
etiqueta_tau_k = Label(vp, text=" Value ")
etiqueta_tau_k.grid(column=2, row=2, sticky=(W,E))
boton_tau_k = Button(vp, text="tau_k", command=click_tau_k)
boton_tau_k.grid(column=1, row=1)
entrada_tau_k = Entry(vp, width =7)
entrada_tau_k.insert(END, '0.164')
entrada_tau_k.grid(column=2, row=1)
etiqueta_tau_l = Label(vp, text=" Value ")
etiqueta_tau_l.grid(column=2, row=4, sticky=(W,E))
boton_tau_l = Button(vp, text="tau_l", command=click_tau_l)
boton_tau_l.grid(column=1, row=3)
entrada_tau_l = Entry(vp, width =7)
entrada_tau_l.insert(END, '0.41')
entrada_tau_l.grid(column=2, row=3)
etiqueta_tau_c = Label(vp, text=" Value ")
etiqueta_tau_c.grid(column=2, row=6, sticky=(W,E))
boton_tau_c = Button(vp, text="tau_c", command=click_tau_c)
boton_tau_c.grid(column=1, row=5)
entrada_tau_c = Entry(vp, width =7)
entrada_tau_c.insert(END, '0.148')
entrada_tau_c.grid(column=2, row=5)
etiqueta_tau_ss = Label(vp, text=" Value ")
etiqueta_tau_ss.grid(column=2, row=8, sticky=(W,E))
boton_tau_ss = Button(vp, text="tau_ss", command=click_tau_ss)
boton_tau_ss.grid(column=1, row=7)
entrada_tau_ss = Entry(vp, width =7)
entrada_tau_ss.insert(END, '0.356')
entrada_tau_ss.grid(column=2, row=7)
etiqueta_tau_pi = Label(vp, text=" Value ")
etiqueta_tau_pi.grid(column=2, row=10, sticky=(W,E))
boton_tau_pi = Button(vp, text="tau_pi", command=click_tau_pi)
boton_tau_pi.grid(column=1, row=9)
entrada_tau_pi = Entry(vp, width =7)
entrada_tau_pi.insert(END, '0.25')
entrada_tau_pi.grid(column=2, row=9)
etiqueta_cita_1 = Label(vp, text=" Value ")
etiqueta_cita_1.grid(column=6, row=2, sticky=(W,E))
boton_cita_1 = Button(vp, text="cita_1", command=click_cita_1)
boton_cita_1.grid(column=5, row=1)
entrada_cita_1 = Entry(vp, width =7)
entrada_cita_1.insert(END, '0.4467')
entrada_cita_1.grid(column=6, row=1)
etiqueta_cita_2 = Label(vp, text=" Value ")
etiqueta_cita_2.grid(column=6, row=4, sticky=(W,E))
boton_cita_2 = Button(vp, text="cita_2", command=click_cita_2)
boton_cita_2.grid(column=5, row=3)
entrada_cita_2 = Entry(vp, width =7)
entrada_cita_2.insert(END, '0.074')
entrada_cita_2.grid(column=6, row=3)
etiqueta_cita_3 = Label(vp, text=" Value ")
etiqueta_cita_3.grid(column=6, row=6, sticky=(W,E))
boton_cita_3 = Button(vp, text="cita_3", command=click_cita_3)
boton_cita_3.grid(column=5, row=5)
entrada_cita_3 = Entry(vp, width =7)
entrada_cita_3.insert(END, '0.3246')
entrada_cita_3.grid(column=6, row=5)
etiqueta_cita_4 = Label(vp, text=" Value ")
etiqueta_cita_4.grid(column=6, row=8, sticky=(W,E))
boton_cita_4 = Button(vp, text='cita_4', command=click_cita_4)
boton_cita_4.grid(column=5, row=7)
entrada_cita_4 = Entry(vp, width = 7)
entrada_cita_4.insert(END, '0.1547')
entrada_cita_4.grid(column=6, row=7)
#Frame 2 (Preferences)
etiqueta_gamma = Label(vp1, text=" Value ")
etiqueta_gamma.grid(column=2, row=2, sticky=(W,E))
boton_gamma = Button(vp1, text="gamma", command=click_gamma)
boton_gamma.grid(column=1, row=1)
entrada_gamma = Entry(vp1, width =7)
entrada_gamma.insert(END, '0.8361')
entrada_gamma.grid(column=2, row=1)
etiqueta_rho = Label(vp1, text=" Value ")
etiqueta_rho.grid(column=4, row=2, sticky=(W,E))
boton_rho = Button(vp1, text="rho", command=click_rho)
boton_rho.grid(column=3, row=1)
entrada_rho = Entry(vp1, width =7)
entrada_rho.insert(END, '-1.0')
entrada_rho.grid(column=4, row=1)
etiqueta_beta = Label(vp1, text=" Value ")
etiqueta_beta.grid(column=2, row=4, sticky=(W,E))
boton_beta = Button(vp1, text="beta", command=click_beta)
boton_beta.grid(column=1, row=3)
entrada_beta = Entry(vp1, width =7)
entrada_beta.insert(END, '0.9606')
entrada_beta.grid(column=2, row=3)
etiqueta_omega = Label(vp1, text=" Value ")
etiqueta_omega.grid(column=4, row=4, sticky=(W,E))
boton_omega = Button(vp1, text="omega", command=click_omega)
boton_omega.grid(column=3, row=3)
entrada_omega = Entry(vp1, width =7)
entrada_omega.insert(END, '0.0806')
entrada_omega.grid(column=4, row=3)
#Frame 3 (Technology)
etiqueta_alpha_p = Label(vp2, text=" Value ")
etiqueta_alpha_p.grid(column=2, row=4, sticky=(W,E))
boton_alpha_p = Button(vp2, text="alpha_p", command=click_alpha_p)
boton_alpha_p.grid(column=1, row=3)
entrada_alpha_p = Entry(vp2, width =7)
entrada_alpha_p.insert(END, '0.3065')
entrada_alpha_p.grid(column=2, row=3)
etiqueta_alpha_g = Label(vp2, text=" Value ")
etiqueta_alpha_g.grid(column=2, row=6, sticky=(W,E))
boton_alpha_g = Button(vp2, text="alpha_g", command=click_alpha_g)
boton_alpha_g.grid(column=1, row=5)
entrada_alpha_g = Entry(vp2, width =7)
entrada_alpha_g.insert(END, '0.1082')
entrada_alpha_g.grid(column=2, row=5)
etiqueta_alpha_l = Label(vp2, text=" Value ")
etiqueta_alpha_l.grid(column=2, row=8, sticky=(W,E))
boton_alpha_l = Button(vp2, text="alpha_l", command=click_alpha_l)
boton_alpha_l.grid(column=1, row=7)
entrada_alpha_l = Entry(vp2, width =7)
entrada_alpha_l.insert(END, '0.5853')
entrada_alpha_l.grid(column=2, row=7)
etiqueta_eta = Label(vp2, text=" Value ")
etiqueta_eta.grid(column=6, row=8, sticky=(W,E))
boton_eta = Button(vp2, text="eta", command=click_eta)
boton_eta.grid(column=5, row=7)
entrada_eta = Entry(vp2, width =7)
entrada_eta.insert(END, '0.47789')
entrada_eta.grid(column=6, row=7)
etiqueta_mu = Label(vp2, text=" Value ")
etiqueta_mu.grid(column=6, row=6, sticky=(W,E))
boton_mu = Button(vp2, text="mu", command=click_mu)
boton_mu.grid(column=5, row=5)
entrada_mu = Entry(vp2, width =7)
entrada_mu.insert(END, '0.6008')
entrada_mu.grid(column=6, row=5)
etiqueta_A = Label(vp2, text=" Value ")
etiqueta_A.grid(column=6, row=4, sticky=(W,E))
boton_A = Button(vp2, text="A", command=click_A)
boton_A.grid(column=5, row=3)
entrada_A = Entry(vp2, width =7)
entrada_A.insert(END, '1.6044')
entrada_A.grid(column=6, row=3)
etiqueta_delta_p = Label(vp2, text=" Value ")
etiqueta_delta_p.grid(column=8, row=4, sticky=(W,E))
boton_delta_p = Button(vp2, text="delta_p", command=click_delta_p)
boton_delta_p.grid(column=7, row=3)
entrada_delta_p = Entry(vp2, width =7)
entrada_delta_p.insert(END, '0.08')
entrada_delta_p.grid(column=8, row=3)
etiqueta_delta_g = Label(vp2, text=" Value ")
etiqueta_delta_g.grid(column=8, row=6, sticky=(W,E))
boton_delta_g = Button(vp2, text="delta_g", command=click_delta_g)
boton_delta_g.grid(column=7, row=5)
entrada_delta_g = Entry(vp2, width =7)
entrada_delta_g.insert(END, '0.04')
entrada_delta_g.grid(column=8, row=5)
#Frame 4 (Environment)
etiqueta_debt = Label(vp3, text=" Value ")
etiqueta_debt.grid(column=2, row=2, sticky=(W,E))
boton_debt = Button(vp3, text="B", command=click_debt)
boton_debt.grid(column=1, row=1)
entrada_debt = Entry(vp3, width =7)
entrada_debt.insert(END, '110')
entrada_debt.grid(column=2, row=1)
etiqueta_bund_r = Label(vp3, text=" Value ")
etiqueta_bund_r.grid(column=6, row=2, sticky=(W,E))
boton_bund_r = Button(vp3, text="Bund R", command=click_bund_r)
boton_bund_r.grid(column=5, row=1)
entrada_bund_r = Entry(vp3, width =7)
entrada_bund_r.insert(END, '0.041')
entrada_bund_r.grid(column=6, row=1)
etiqueta_risk_p = Label(vp3, text=" Value ")
etiqueta_risk_p.grid(column=6, row=4, sticky=(W,E))
boton_risk_p = Button(vp3, text="Premium", command=click_risk_p)
boton_risk_p.grid(column=5, row=3)
entrada_risk_p = Entry(vp3, width =7)
entrada_risk_p.insert(END, '0.0')
entrada_risk_p.grid(column=6, row=3)
def run_program():
#Basic data and calibration for 2002-2006
#Env
bund_r = float(entrada_bund_r.get())
risk_p = float(entrada_risk_p.get())
B = float(entrada_debt.get())
RB = bund_r+risk_p
RB1 = RB
#Gov
tauk = float(entrada_tau_k.get())
taus = float(entrada_tau_ss.get())
tauc = float(entrada_tau_c.get())
taul = float(entrada_tau_l.get())
taupi = float(entrada_tau_pi.get())
theta1 = float(entrada_cita_1.get())
theta2 = float(entrada_cita_2.get())
theta3 = float(entrada_cita_3.get())
theta4 = float(entrada_cita_4.get())
#Pref
beta = float(entrada_beta.get())
rho = float(entrada_rho.get())
omega = float(entrada_omega.get())
gamma = float(entrada_gamma.get())
#Tech
deltap = float(entrada_delta_p.get())
deltag = float(entrada_delta_g.get())
alphap = float(entrada_alpha_p.get())
alphag = float(entrada_alpha_g.get())
alphal = float(entrada_alpha_l.get())
eta = float(entrada_eta.get())
mu = float(entrada_mu.get())
A = float(entrada_A.get())
#Misc
pi_c = 1
H = 100
#Compute a steady state for each value of G/Y
tic=timeit.default_timer()
init = 0.01 #%Lowest G/Y ratio
final = 0.65 # %Highest G/Y ratio
T = 1000 #%Density 1/T
def g(x):
return GreeceModelRBFOC(x, param)
varNames = ["Kpss", "Kgss", "Lpss", "Lgss", "Bss", "Ipss", "Igss", "Lss", \
"Yss", "Rss", "PmKgss", "Wpss", "PmLgss", "Gss", "Cgss", "Zss", \
"Cpss", "Css", "Wgss", "PIss", "IFss", "ratGY"]
for name in varNames:
globals()[name] = np.zeros(T)
x0 = [237, 83, 46, 11, 110]
for t in range(T):
ratGY[t] = init+(final-init)*t/(T-1)
param = [alphap, alphag, RB1, deltap, deltag, gamma, rho, theta1, \
theta2, theta3, pi_c, mu, eta, omega, tauc, taul, tauk, taus, \
taupi, H, A, ratGY[t], RB]
crit = 1e-10
maxit = 1000
sol = fsolve(g, x0, xtol=1.e-06)
Kpss[t] = sol[0]
Kgss[t] = sol[1]
Lpss[t] = sol[2]
Lgss[t] = sol[3]
Bss[t] = sol[4]
Ipss[t] = deltap*Kpss[t]
Igss[t] = deltag*Kgss[t]
Lss[t] = Lgss[t]+Lpss[t]
Yss[t] = A*Kpss[t]**alphap*Kgss[t]**alphag*(mu*Lpss[t]**eta+ \
(1-mu)*Lgss[t]**eta)**(alphal/eta)
Rss[t] = alphap*A*Kpss[t]**(alphap-1)*Kgss[t]**alphag*(mu*Lpss[t]** \
eta+(1-mu)*Lgss[t]**eta)**(alphal/eta)
PmKgss[t] = alphag*A*Kpss[t]**alphap*Kgss[t]**(alphag-1)*(mu*Lpss[t]** \
eta+(1-mu)*Lgss[t]**eta)**(alphal/eta)
Wpss[t] = (alphal*A*mu*Lpss[t]**(eta-1)*Kpss[t]**alphap*Kgss[t]**alphag* \
(mu*Lpss[t]**eta+(1-mu)*Lgss[t]**eta)**(alphal/eta-1))/(1+taus)
PmLgss[t] = (alphal*A*(1-mu)*Lgss[t]**(eta-1)*Kpss[t]**alphap*Kgss[t]** \
alphag*(mu*Lpss[t]**eta+(1-mu)*Lgss[t]**eta)** \
(alphal/eta-1))/(1+taus)
Gss[t] = ratGY[t]*Yss[t]
Cgss[t] = theta1*Gss[t]
Zss[t] = theta4*Gss[t]
Cpss[t] = Yss[t]-Ipss[t]-Igss[t]-Cgss[t]-RB*Bss[t]
Css[t] = Cpss[t]+pi_c*Cgss[t]
Wgss[t] = (omega/(1-omega))**(-1/(2*rho))*(theta3*Gss[t]/(1+taus))**(1/2)
PIss[t] = Yss[t]-(1+taus)*Wpss[t]*Lpss[t]-Rss[t]*Kpss[t]
IFss[t] = tauc*Cpss[t]+(taul+taus)*(Wpss[t]*Lpss[t]+Wgss[t]*Lgss[t])+ \
tauk*(Rss[t]-deltap)*Kpss[t]+taupi*PIss[t]
tb = t
if Bss[t] <= 0:
break
x0 = [Kpss[t], Kgss[t], Lpss[t], Lgss[t], Bss[t]]
def call_back_save():
wb = Workbook()
dest_filename = 'Experiment_Results.xlsx'
ws1 = wb.create_sheet(title="Experiment")
#ws1 = wb.active
for row in range(2,tb):
_= ws1.cell(column = 1, row = 1, value = 'Kpss')
_= ws1.cell(column = 1, row = row, value = Kpss[row-2])
_= ws1.cell(column = 2, row = 1, value = 'Kgss')
_= ws1.cell(column = 2, row = row, value = Kgss[row-2])
_= ws1.cell(column = 3, row = 1, value = 'Lpss')
_= ws1.cell(column = 3, row = row, value = Lpss[row-2])
_= ws1.cell(column = 4, row = 1, value = 'Lgss')
_= ws1.cell(column = 4, row = row, value = Lgss[row-2])
_= ws1.cell(column = 5, row = 1, value = 'Bss')
_= ws1.cell(column = 5, row = row, value = Bss[row-2])
_= ws1.cell(column = 6, row = 1, value = 'Ipss')
_= ws1.cell(column = 6, row = row, value = Ipss[row-2])
_= ws1.cell(column = 7, row = 1, value = 'Igss')
_= ws1.cell(column = 7, row = row, value = Igss[row-2])
_= ws1.cell(column = 8, row = 1, value = 'Lss')
_= ws1.cell(column = 8, row = row, value = Lss[row-2])
_= ws1.cell(column = 9, row = 1, value = 'Yss')
_= ws1.cell(column = 9, row = row, value = Yss[row-2])
_= ws1.cell(column = 10, row = 1, value = 'Rss')
_= ws1.cell(column = 10, row = row, value = Rss[row-2])
_= ws1.cell(column = 11, row = 1, value = 'Wpss')
_= ws1.cell(column = 11, row = row, value = Wpss[row-2])
_= ws1.cell(column = 12, row = 1, value = 'Wgss')
_= ws1.cell(column = 12, row = row, value = Wgss[row-2])
_= ws1.cell(column = 13, row = 1, value = 'Gss')
_= ws1.cell(column = 13, row = row, value = Gss[row-2])
_= ws1.cell(column = 14, row = 1, value = 'Zss')
_= ws1.cell(column = 14, row = row, value = Zss[row-2])
_= ws1.cell(column = 15, row = 1, value = 'Cgss')
_= ws1.cell(column = 15, row = row, value = Cgss[row-2])
_= ws1.cell(column = 16, row = 1, value = 'Cpss')
_= ws1.cell(column = 16, row = row, value = Cpss[row-2])
_= ws1.cell(column = 17, row = 1, value = 'Css')
_= ws1.cell(column = 17, row = row, value = Css[row-2])
_= ws1.cell(column = 18, row = 1, value = 'Zss')
_= ws1.cell(column = 18, row = row, value = Zss[row-2])
_= ws1.cell(column = 19, row = 1, value = 'PIss')
_= ws1.cell(column = 19, row = row, value = PIss[row-2])
_= ws1.cell(column = 20, row = 1, value = 'IFss')
_= ws1.cell(column = 20, row = row, value = IFss[row-2])
_= ws1.cell(column = 21, row = 1, value = 'ratGY')
_= ws1.cell(column = 21, row = row, value = ratGY[row-2])
wb.save(filename = dest_filename)
botonSave = Button(vp4, text="Save", command = call_back_save())
botonSave.grid(column=6, row=5)
toc=timeit.default_timer()
textTime = "%.4f" %(toc - tic )
#time = (toc - tic )
app.title("MoU computed in " + textTime + " seconds")
dataGreece = np.array([[2002, 45.1, 101.7],
[2003, 44.7, 97.4],
[2004, 45.5, 98.6],
[2005, 44.6, 100],
[2006, 45.3, 106.1],
[2007, 47.5, 105.4],
[2008, 50.6, 110.7],
[2009, 54, 129.7],
[2010, 51.5, 148.3],
[2011, 51.8, 170.6]])
fig = plt.figure(num = 1, figsize=(6, 6), dpi = 80, facecolor = '#888888', \
edgecolor= 'k')
plt.xlim(40,55)
plt.ylim(35,180)
plt.plot(100*Gss[0:tb]/Yss[0:tb], 100*Bss[0:tb]/Yss[0:tb])
plt.title('Debt and Expenditures')
plt.plot(dataGreece[:,1], dataGreece[:,2], '-o')
plt.plot(dataGreece[:,1], dataGreece[:,2], 'ro')
plt.grid(True)
plt.text(44, 90, '2002-2006')
plt.text(47.5, 100, '2007')
plt.text(51, 108, '2008')
plt.text(52, 130, '2009')
plt.text(50, 150, '2010')
plt.text(50.5, 172, '2011')
plt.ylabel('Debt to GDP')
plt.xlabel('Expenditures to GDP')
canvas = FigureCanvasTkAgg(fig, master=app)
plot_widget = canvas.get_tk_widget()
plt.show()
#Ejecucion de la app
app.mainloop()
|
gpl-3.0
|
herwaldo/Simulacion
|
area_circulo.py
|
1
|
1294
|
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import math
a=48.0
control=0
datosX=[]
datosY=[]
radio=0.0
b=34.0
m=123456789.0
k=6.0
Xn=0.0
delta=0.0
rando_x=0.0
rando_y=0.0
gerX=0.0
gerY=0.0
x=0.0
y=0.0
i=0.0
limite=0.0
def congruencialMixto(entrada):
Xn = ((a*entrada)+b)%m
return Xn
def congruencialMixtoXn(entrada):
Xn = ((a*entrada)+b)%m
return Xn
def convertir(numero):
val=((2*k)*numero)-k
return val
x0 = input("Ingrese el valor de la semilla: \n")
limite = input ("Ingrese el numero de valores aleatorios a generar: \n")+0.0
radio = input("Ingrese el radio del circulo: \n")+0.0
rando_x=congruencialMixto(x0)
gerX=congruencialMixtoXn(x0)
while i<limite:
rando_y=congruencialMixto(gerX)/m
gerY=congruencialMixtoXn(gerX)
rando_x=congruencialMixto(gerY)/m
gerX=congruencialMixtoXn(gerY)
x=convertir(rando_x)
y=convertir(rando_y)
datosX.append(x)
datosY.append(y)
i+=1
valor=rando_x**2+rando_y**2
if (valor**2)<=radio:
delta+=((1/limite)*((2*radio)**2))
plt.ion()
ax=plt.gca()
if valor<=(radio**2):
ax.plot(datosX[control],datosY[control],"b*")
else:
ax.plot(datosX[control],datosY[control],"r*")
plt.draw()
control+=1
print "Este es el radio generado: "+str(delta)+",este es el radio teorico: "+str(math.pi*(radio**2))
|
gpl-2.0
|
tapomayukh/projects_in_python
|
classification/Classification_with_CRF/HCRF_package/test_hcrf_algo.py
|
1
|
4594
|
import pylab as pyl
import matplotlib.pyplot as pp
import numpy as np
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import csv
import sys
sys.path.insert(0,'/home/tapo/Softwares/CRFs/HCRF2.0b/apps/swig/PyHCRF')
import os
import PyHCRF
if __name__ == '__main__':
groups = 30
num_features = 1
home = '/home/tapo/svn/robot1_data/usr/tapo/data/CRFs/4-category/'+str(groups)+'/'+str(num_features)+'_feature/'
graph = '/home/tapo/svn/robot1_data/usr/tapo/graphs/CRFs_HCRFs/'
params = '/home/tapo/svn/robot1_data/usr/tapo/params/hcrf_params/'
num_states = 10
window = 5
# Create the model
hcrf_model = PyHCRF.ToolboxHCRF(num_states, PyHCRF.OPTIMIZER_LBFGS, window)
print "Model Created"
num_folds = 5
truth_labels = []
result_labels = []
for nfold in range(1, num_folds+1):
print " "
# Delete old files
#os.remove('features.txt')
#os.remove('models.txt')
#os.remove('results.txt')
#os.remove('stats.txt')
# Loading Labels for comparisons later
with open(home+'seqLabelsTest'+str(nfold)+'.csv','rb') as testLabelFile:
datareader=csv.reader(testLabelFile, delimiter=' ')
for row in datareader:
truth_labels.append(row)
#print truth_labels
#print np.size(truth_labels)
# Load Training and Testing Data
training_set = PyHCRF.DataSet(home+'dataTrain'+str(nfold)+'.csv', None,
home+'seqLabelsTrain'+str(nfold)+'.csv')
testing_set = PyHCRF.DataSet(home+'dataTest'+str(nfold)+'.csv', None,
home+'seqLabelsTest'+str(nfold)+'.csv')
print "Data Loaded for Fold :", nfold
# Train the model
print "Training Model for Fold :", nfold
hcrf_model.train(training_set)
# Save Model and Features
print "Saving Model for Fold :", nfold
hcrf_model.save(params+'models_'+'state_'+str(state)+'_window_'+str(window)+'_groups_'+str(groups)+'_features_'+str(num_features)+'.txt',params+'features_'+'state_'+str(state)+'_window_'+str(window)+'_groups_'+str(groups)+'_features_'+str(num_features)+'.txt')
# Test the model
print "Testing Model for Fold :", nfold
hcrf_model.test(testing_set, params+'results_'+'state_'+str(state)+'_window_'+str(window)+'_groups_'+str(groups)+'_features_'+str(num_features)+'.txt', params+'stats_'+'state_'+str(state)+'_window_'+str(window)+'_groups_'+str(groups)+'_features_'+str(num_features)+'.txt')
result_file = open(params+'results_'+'state_'+str(state)+'_window_'+str(window)+'_groups_'+str(groups)+'_features_'+str(num_features)+'.txt','r')
result_file.seek(0)
for line in result_file:
result_labels.append(line.split()[0])
result_file.close()
#print result_labels
#print np.size(result_labels)
# Plot Confusion Matrix
# rows are predictions from algorithm, #
# coliumns are ground truth values (targets), 0->RF, 1->RM, 2->SF,
# 3-> SM
truth_labels = np.array(truth_labels).flatten()
result_labels = np.array(result_labels).flatten()
#print truth_labels[1]
#print result_labels[1]
cmat = np.zeros((4,4))
for i in range(len(truth_labels)):
cmat[int(result_labels[i])-1][int(truth_labels[i])-1]+=1
acc_percent = round((cmat[0][0]+cmat[1][1]+cmat[2][2]+cmat[3][3])*100/len(truth_labels), 2)
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels], cmap = 'gray_r')
ax.set_title('Performance of HCRF Models (' + str(acc_percent) + '%)')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j],color='k')
if cmat[i][j] > 20:
pp.text(j+0.5,3.5-i,cmat[i][j],color='w')
j = j+1
i = i+1
#pp.show()
pp.savefig(graph+'hcrf_'+str(groups)+'_groups'+str(num_states)+'_states'+str(num_features)+'_feature_window_'+str(window)+'.png')
|
mit
|
sniemi/SamPy
|
sandbox/src1/examples/custom_projection_example.py
|
1
|
17587
|
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
from matplotlib.projections import register_projection
# This example projection class is rather long, but it is designed to
# illustrate many features, not all of which will be used every time.
# It is also common to factor out a lot of these methods into common
# code used by a number of projections with similar characteristics
# (see geo.py).
class HammerAxes(Axes):
"""
A custom class for the Aitoff-Hammer projection, an equal-area map
projection.
http://en.wikipedia.org/wiki/Hammer_projection
"""
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='hammer')``.
name = 'hammer'
# The number of interpolation steps when converting from straight
# lines to curves. (See ``transform_path``).
RESOLUTION = 75
def __init__(self, *args, **kwargs):
Axes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def cla(self):
"""
Override to set up some reasonable defaults.
"""
# Don't forget to call the base class
Axes.cla(self)
# Set up a default grid spacing
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
# Turn off minor ticking altogether
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
# Do not display ticks -- we only want gridlines and text
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
# The limits on this projection are fixed -- they are not to
# be changed by the user. This makes the math in the
# transformation itself easier, and since this is a toy
# example, the easier, the better.
Axes.set_xlim(self, -npy.pi, npy.pi)
Axes.set_ylim(self, -npy.pi / 2.0, npy.pi / 2.0)
def cla(self):
"""
Initialize the Axes object to reasonable defaults.
"""
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
# self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -npy.pi, npy.pi)
Axes.set_ylim(self, -npy.pi / 2.0, npy.pi / 2.0)
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space (in this case longitude and latitude) to axes
# space. It is separated into a non-affine and affine part so
# that the non-affine part does not have to be recomputed when
# a simple affine change to the figure has been made (such as
# resizing the window or changing the dpi).
# 1) The core transformation from data space into
# rectilinear space defined in the HammerTransform class.
self.transProjection = self.HammerTransform(self.RESOLUTION)
# 2) The above has an output range that is not in the unit
# rectangle, so scale and translate it so it fits correctly
# within the axes. The peculiar calculations of xscale and
# yscale are specific to a Aitoff-Hammer projection, so don't
# worry about them too much.
xscale = 2.0 * npy.sqrt(2.0) * npy.sin(0.5 * npy.pi)
yscale = npy.sqrt(2.0) * npy.sin(0.5 * npy.pi)
self.transAffine = Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# The main data transformation is set up. Now deal with
# gridlines and tick labels.
# Longitude gridlines and ticklabels. The input to these
# transforms are in display space in x and axes space in y.
# Therefore, the input values will be in range (-xmin, 0),
# (xmax, 1). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the equator.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, npy.pi) \
.translate(0.0, -npy.pi)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# Now set up the transforms for the latitude ticks. The input to
# these transforms are in axes space in x and display space in
# y. Therefore, the input values will be in range (0, -ymin),
# (1, ymax). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the edge of the axes ellipse.
yaxis_stretch = Affine2D().scale(npy.pi * 2.0, 1.0).translate(-npy.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def get_xaxis_transform(self):
"""
Override this method to provide a transformation for the
x-axis grid and ticks.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
"""
Override this method to provide a transformation for the
y-axis grid and ticks.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text2_transform, 'center', 'left'
def get_axes_patch(self):
"""
Override this method to define the shape that is used for the
background of the plot. It should be a subclass of Patch.
In this case, it is a Circle (that may be warped by the axes
transform into an ellipse). Any data and gridlines will be
clipped to this shape.
"""
return Circle((0.5, 0.5), 0.5)
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
def set_xscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_xscale(self, *args, **kwargs)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_yscale(self, *args, **kwargs)
# Prevent the user from changing the axes limits. In our case, we
# want to display the whole sphere all the time, so we override
# set_xlim and set_ylim to ignore any input. This also applies to
# interactive panning and zooming in the GUI interfaces.
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -npy.pi, npy.pi)
Axes.set_ylim(self, -npy.pi / 2.0, npy.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
"""
Override this method to change how the values are displayed in
the status bar.
In this case, we want them to be displayed in degrees N/S/E/W.
"""
long = long * (180.0 / npy.pi)
lat = lat * (180.0 / npy.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
# \u00b0 : degree symbol
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
class DegreeFormatter(Formatter):
"""
This is a custom formatter that converts the native unit of
radians into (truncated) degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / npy.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
# \u00b0 : degree symbol
return u"%d\u00b0" % degrees
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface to set the
ticking than set_xticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
npy.linspace(-npy.pi, npy.pi, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.xaxis.set_major_formatter(self.DegreeFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface than
set_yticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
npy.linspace(-npy.pi / 2.0, npy.pi / 2.0, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.yaxis.set_major_formatter(self.DegreeFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
Often, in geographic projections, you wouldn't want to draw
longitude gridlines near the poles. This allows the user to
specify the degree at which to stop drawing longitude grids.
This is an example method that is specific to this projection
class -- it provides an interface to something that has no
analogy in the base Axes class.
"""
longitude_cap = degrees * (npy.pi / 180.0)
# Change the xaxis gridlines transform so that it draws from
# -degrees to degrees, rather than -pi to pi.
self._xaxis_pretransform \
.clear() \
.scale(1.0, longitude_cap * 2.0) \
.translate(0.0, -longitude_cap)
def get_data_ratio(self):
"""
Return the aspect ratio of the data itself.
This method should be overridden by any Axes that have a
fixed data ratio.
"""
return 1.0
# Interactive panning and zooming is not supported with this projection,
# so we override all of the following methods to disable it.
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
# Now, the transforms themselves.
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
"""
Override the transform method to implement the custom transform.
The input and output are Nx2 numpy arrays.
"""
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = npy.cos(latitude)
sqrt2 = npy.sqrt(2.0)
alpha = 1.0 + cos_latitude * npy.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * npy.sin(half_long)) / alpha
y = (sqrt2 * npy.sin(latitude)) / alpha
return npy.concatenate((x, y), 1)
# This is where things get interesting. With this projection,
# straight lines in data space become curves in display space.
# This is done by interpolating new values between the input
# values of the data. Since ``transform`` must not return a
# differently-sized array, any transform that requires
# changing the length of the data array must happen within
# ``transform_path``.
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = npy.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * npy.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = npy.arcsin(y*z)
return npy.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
# The inverse of the inverse is the original transform... ;)
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
# Now register the projection with matplotlib so the user can select
# it.
register_projection(HammerAxes)
# Now make a simple example using the custom projection.
from pylab import *
subplot(111, projection="hammer")
grid(True)
show()
|
bsd-2-clause
|
adam-rabinowitz/ngs_analysis
|
gtf/gene_conversion.py
|
2
|
20625
|
import collections
import re
import pandas as pd
import numpy as np
from scipy.stats import hypergeom as ssh
import statsmodels.stats.multitest as ssm
def create_tran_dictionary(tranfile):
''' Parses text files to create dictionary translating
between two sets of names.
Args:
tranfile - Full path to tab delimited text file listing input
names in the first column and output names in second column.
Returns:
tranDict - A dictionary where keys are input names and values
are lists of output names.
log - A dictionary listing the number of output names found
for the input names.
'''
# Sequentially process lines in file and add to dictionary
tranDict = {}
with open(tranfile) as infile:
for line in infile:
# Extract input and output genes from line
lineData = line.strip().split('\t')
if len(lineData) == 1:
inGene = lineData[0]
outGene = ''
elif len(lineData) == 2:
inGene, outGene = lineData
else:
print(lineData)
raise ValueError('Line does not have 1 or 2 elements')
# Add gene data to dictionary
if inGene in tranDict:
if outGene:
tranDict[inGene].add(outGene)
else:
tranDict[inGene] = set()
if outGene:
tranDict[inGene].add(outGene)
# Convert sets to lists
tranDict = {k:list(v) for k,v in tranDict.iteritems()}
# Create log and return data
log = collections.defaultdict(int)
for values in tranDict.itervalues():
log[len(values)] += 1
return(tranDict, log)
def replace_gene_names(
infile, outfile, genecol, translation, header, rmdup=True
):
''' Parses input text files to create output text file where input
gene names are replaced by gene names in translation dictionary.
Args:
infile (str)- Full path to input file.
outfile (str)- Full path to output file.
genecol (int)- Column in input file containing gene names.
translation (dict)- Dictionary containing gene translations.
header (bool)- Does input file contain a header.
rmdup (bool)- Remove genes with multiple translations.
Returns:
log - A dictionary listing number of genes
'''
# Check arguments
if not isinstance(genecol, int) or genecol < 0:
raise ValueError('genecol argument must be integer >= 0')
if not isinstance(translation, dict):
raise TypeError('translation argument must be dictionary')
if not isinstance(header, bool):
raise TypeError('header argument must be bool')
if not isinstance(rmdup, bool):
raise TypeError('rmdup argument must be boolean')
print(rmdup)
# Check names
outGeneCounter = collections.defaultdict(int)
with open(infile, 'r') as filein:
if header:
filein.next()
for line in filein:
lineData = line.strip().split('\t')
inGene = lineData[genecol]
outGenes = translation[inGene]
for outGene in outGenes:
outGeneCounter[outGene] += 1
# Create output files
inCounter = collections.defaultdict(int)
outCounter = collections.defaultdict(int)
with open(infile, 'r') as filein, open(outfile, 'w') as fileout:
# Write header
if header:
line = filein.next()
fileout.write(line)
# Loop through line and create output
for line in filein:
# Parse line and extract input and output genes
lineData = line.strip().split('\t')
inGene = lineData[genecol]
outGenes = translation[inGene]
inCounter[len(outGenes)] += 1
# Count and skip input genes with no output
if len(outGenes) > 1 and rmdup:
continue
# Loop through outputs
for outGene in outGenes:
inCount = outGeneCounter[outGene]
outCounter[inCount] += 1
if inCount > 1 and rmdup:
continue
# Create and write output
lineData[genecol] = outGene
fileout.write('{}\n'.format('\t'.join(lineData)))
# Return log
return(inCounter, outCounter)
def extract_gene_results(
results, geneCol, statCol, statMax, header
):
''' Parses text files to extract significant genes for ontology
analysis.
Args:
results - Full path to text file.
geneCol (int) - Column containing gene names.
statCol (int) - Column containing significance statistic.
statMax (float) - Threshold of significance for statistic.
header (bool) - Whether text file contains a header.
Returns:
allGenes (set)- The set of all genes in file with an
associated statistic.
sigGenes (set)- The set of genes with a significant statistic.
'''
# Check arguments
if not isinstance(geneCol, int) or geneCol < 0:
raise ValueError('Unacceptable value for geneCol')
if not isinstance(statCol, int) or statCol < 0:
raise ValueError('Unacceptable value for statCol')
if not isinstance(statMax, float) or 0 > statMax > 1:
raise ValueError('Unacceptable value for statMax')
if not isinstance(header, bool):
raise ValueError('Unacceptable value for header')
# Create variables to store data
allGenes = set()
sigGenes = set()
# Loop through input file
with open(results) as infile:
if header:
infile.next()
for line in infile:
# Extact and store input gene
lineData = line.strip().split('\t')
gene = lineData[geneCol]
if gene in allGenes:
raise ValueError('Genes duplicated in results file')
# Extract and store statistic
stat = lineData[statCol]
if stat == 'NA':
continue
stat = float(lineData[statCol])
# Store genes and signifcant genes
allGenes.add(gene)
if stat <= statMax:
sigGenes.add(gene)
# Return data
return((allGenes, sigGenes))
def extract_gene_results_posneg(
results, geneCol, log2Col, statCol, statMax, header
):
''' Parses text files to extract genes for ontology analysis.
Args:
results - Full path to text file.
geneCol (int) - Column containing gene names.
log2Col (int) - Column containing log2 values/
statCol (int) - Column containing significance statistic.
statMax (float) - Threshold of significance for statistic.
header (bool) - Whether text file contains a header.
Returns:
allGenes (set)- The set of all genes in file with an
associated statistic and log fold change.
posGenes (set)- The set of significane genes in file
with a positive log fold change.
negGenes (set)- The set of significane genes in file
with a negative log fold change.
'''
# Check arguments
if not isinstance(geneCol, int) or geneCol < 0:
raise ValueError('Unacceptable value for geneCol')
if not isinstance(log2Col, int) or log2Col < 0:
raise ValueError('Unacceptable value for geneCol')
if not isinstance(statCol, int) or statCol < 0:
raise ValueError('Unacceptable value for statCol')
if not isinstance(statMax, float) or 0 > statMax > 1:
raise ValueError('Unacceptable value for statMax')
if not isinstance(header, bool):
raise ValueError('Unacceptable value for header')
# Create variables to store data
allGenes = set()
posGenes = set()
negGenes = set()
# Loop through input file
with open(results) as infile:
if header:
infile.next()
for line in infile:
# Extact and store input gene
lineData = line.strip().split('\t')
gene = lineData[geneCol]
if gene in allGenes:
raise ValueError('Genes duplicated in results file')
# Extract and store statistic
stat = lineData[statCol]
if stat == 'NA':
continue
stat = float(stat)
log2 = lineData[log2Col]
if log2 == 'NA':
continue
log2 = float(log2)
# Store genes and signifcant genes
allGenes.add(gene)
if stat <= statMax:
if log2 > 0:
posGenes.add(gene)
elif log2 < 0:
negGenes.add(gene)
else:
raise ValueError('Unexpected log2 value')
# Return data
return((allGenes, posGenes, negGenes))
def parse_gmt(gmt):
''' Parses Broad gmt files for gene ontology analysis.
Args:
gmt - Full path to gmt file
Returns:
anno2gene - A dictionary where keys are annotations
and values are sets of genes.
'''
# Create dictionaries to store data
anno2gene = {}
# Create dictionary to store annotation and loop through file
with open(gmt) as infile:
for line in infile:
# Extract data from line
lineData = line.strip().split('\t')
annotation = lineData[0]
geneSet = set(lineData[2:])
# Add data to dictionary
if annotation in anno2gene:
if not anno2gene[annotation] == geneSet:
raise ValueError(
'Conflicting gene sets: {}'.format(annotation))
anno2gene[annotation] = geneSet
# Return dictionary
return(anno2gene)
def calculate_hypergeo(
allGenes, sigGenes, geneAnno, minGO = 5, maxGO = 500, minGene = 3,
annoGenesOnly = True
):
''' Generates pvalues and fdr for gene ontology terms.
Args:
allGenes (set)- A set of all genes in experiment
sigGenes (set)- A set of significant genes in experiment.
geneAnno (dict)- A dictionary of gene ontology terms and sets of
genes associated with the term.
minGO (int)- Minimum number of genes associated with GO term.
maxGO (int)- Maximum number of genes associated with GO term.
minGene (int)- Minimum number of significant genes assocaited
with GO term.
annoGenesOnly (bool)- Whether to only use genes with annotation
in pvalue calculation. False will use all genes.
Returns:
outDF - A pandas dataframe containing the results of the analyis.
'''
# Check arguments
if not isinstance(minGO, int) or minGO < 1:
raise ValueError
if not isinstance(maxGO, int) or maxGO < minGO:
raise ValueError
if not isinstance(minGene, int) or minGene < 1:
raise ValueError
# Check gene lists
if not isinstance(allGenes, set):
allGenes = set(allGenes)
if not isinstance(sigGenes, set):
sigGenes = set(sigGenes)
if not sigGenes.issubset(allGenes):
raise ValueError('sigGenes must be subset of allGenes')
# Extract background genes for pvalue calculation and count
if annoGenesOnly:
annoGenes = set()
for genes in geneAnno.itervalues():
for gene in genes:
annoGenes.add(gene)
background = annoGenes.intersection(allGenes)
backgroundSig = annoGenes.intersection(sigGenes)
else:
background = allGenes
backgroundSig = sigGenes
N = len(background)
n = len(backgroundSig)
# Loop through gene annotation
outList = []
for anno, annoList in geneAnno.iteritems():
annoSet = set(annoList)
# Check number of genes associated with term
K = len(background.intersection(annoSet))
if minGO > K > maxGO:
continue
# Check number of significant genes associated with term
k = len(backgroundSig.intersection(annoSet))
if k < minGene:
continue
# Generate and store pvalue
pvalue = 1 - ssh.cdf(k-1, N, K, n)
outList.append((anno, N, n, K, k, pvalue))
# Process and return data
outDF = pd.DataFrame(outList, columns=['term','N','n','K','k','pval'])
outDF = outDF.sort_values(by='pval')
outDF['fdr'] = ssm.multipletests(outDF['pval'], method='fdr_bh')[1]
return(outDF)
def calculate_hypergeo_posneg(
allGenes, posGenes, negGenes, geneAnno, minGO = 5, maxGO = 500,
minGene = 3, combined = False, annoGenesOnly = False
):
''' Generates pvalues and fdr for gene ontology terms. Considers
signficant genes with a positive and negative fold change seperately.
Args:
allGenes (set)- Set of all genes in experiment
posGenes (set)- Set of significant genes with positive fold change.
negGenes (set)- Set of significant genes with negative fold change
geneAnno (dict)- A dictionary of gene ontology terms and sets of
genes associated with the term.
minGO (int)- Minimum number of genes associated with GO term.
maxGO (int)- Maximum number of genes associated with GO term.
minGene (int)- Minimum number of significant genes assocaited
with GO term.
annoGenesOnly (bool)- Whether to only use genes with annotation
in pvalue calculation. False will use all genes.
Returns:
outDF - A pandas dataframe containing the results of the analyis.
'''
# Check arguments
if not isinstance(minGO, int) or minGO < 1:
raise ValueError
if not isinstance(maxGO, int) or maxGO < minGO:
raise ValueError
if not isinstance(minGene, int) or minGene < 1:
raise ValueError
# Check gene lists
if not isinstance(allGenes, set):
allGenes = set(allGenes)
if not isinstance(posGenes, set):
posGenes = set(posGenes)
if not isinstance(negGenes, set):
negGenes = set(negGenes)
if not posGenes.issubset(allGenes):
raise ValueError('posGenes must be subset of allGenes')
if not negGenes.issubset(allGenes):
raise ValueError('negGenes must be subset of allGenes')
if len(posGenes.intersection(negGenes)) > 0:
raise ValueError('overlap between posGenes and negGenes')
# Extract background genes for pvalue calculation and count
if annoGenesOnly:
annoGenes = set()
for genes in geneAnno.itervalues():
for gene in genes:
annoGenes.add(gene)
background = annoGenes.intersection(allGenes)
sigDict = {'pos':annoGenes.intersection(posGenes),
'neg':annoGenes.intersection(negGenes)}
else:
background = allGenes
sigDict = {'pos':posGenes, 'neg':negGenes}
# Add combined set if required
if combined:
sigDict['com'] = sigDict['pos'].union(sigDict['neg'])
# Loop through conditions and gene annotation
outList = []
N = len(background)
for condition in sigDict.keys():
sigGenes = sigDict[condition]
n = len(sigGenes)
for anno, annoList in geneAnno.iteritems():
annoSet = set(annoList)
# Check number of genes associated with term
K = len(background.intersection(annoSet))
# Check number of significant genes associated with term
k = len(sigGenes.intersection(annoSet))
if k < minGene or minGO > K > maxGO:
pvalue = np.NaN
else:
pvalue = ssh.sf(k, N, K, n, loc=1)
outList.append((anno, condition, N, n, K, k, pvalue))
# Add false discovery rate
outDF = pd.DataFrame(outList, columns=[
'term', 'query', 'N','n','K','k','pval'])
outDF = outDF.sort_values(by='pval')
pvalues = outDF['pval'][~np.isnan(outDF['pval'])]
pvalueIndices = outDF['pval'].index[~outDF['pval'].apply(np.isnan)]
fdr = ssm.multipletests(pvalues, method='fdr_bh')[1]
outDF.loc[pvalueIndices, 'fdr'] = fdr
return(outDF)
def extract_overlap_results_posneg(
outPrefix, gmt, results, geneCol, log2Col, statCol, statMax = 0.05
):
# Parse gmt file
gmtData = parse_gmt(gmt)
# Create output dictionary
outputDict = {}
for term in gmtData.keys():
outputDict[(term, 'pos')] = []
outputDict[(term, 'neg')] = []
# Open results file and extract header
with open(results) as inFile:
# Add header to output dictionary
header = inFile.next().strip()
for term in outputDict:
outputDict[term].append(header)
# Extract line data
for line in inFile:
lineData = line.strip().split('\t')
# Extract and check stat data
stat = lineData[statCol]
if stat == 'NA':
continue
stat = float(stat)
# Loop through gmt terms and find matches
log2 = lineData[log2Col]
if log2 == 'NA':
continue
log2 = float(log2)
# Store genes and signifcant genes
gene = lineData[geneCol]
for term, termGenes in gmtData.items():
if gene in termGenes:
if log2 > 0:
outputDict[(term, 'pos')].append(line.strip())
elif log2 < 0:
outputDict[(term, 'neg')].append(line.strip())
# Loop through data and create output files
for term, change in outputDict.keys():
outLines = '\n'.join(outputDict[(term, change)])
term = re.sub('\s', '_', term)
outFile = '{}.{}.{}.results'.format(outPrefix, term, change)
with open(outFile, 'w') as out:
out.write(outLines)
def extract_ensembl_names(gtf):
# Create regular expressions
eRE = re.compile('gene_id\s+"(.*?)";')
nRE = re.compile('gene_name\s+"(.*?)";')
# Sequentially process lines in file and add to dictionary
nameDict = collections.defaultdict(set)
with open(gtf) as filein:
for line in filein:
# Skip lines starting with #
if line.startswith('#'):
continue
# Extract ensembl and gene names from dictionary
data = line.strip().split('\t')[8]
ensembl = re.search(eRE, data).group(1)
name = re.search(nRE, data).group(1)
nameDict[ensembl].add(name)
# Convert sets to lists
nameDict = {k:list(v) for k,v in nameDict.iteritems()}
# Create log and return data
logDict = collections.defaultdict(int)
for values in nameDict.itervalues():
logDict[len(values)] += 1
return(nameDict, logDict)
def span_translations(tran1, tran2, log=True):
# Check argument and create output variables
if not isinstance(log, bool):
raise TypeError('Log argument must be boolean')
tranDict = {}
# Loop through values in 1st dictionary
for key, valueList1 in tran1.iteritems():
# Create entry for key in output dictionary
if key not in tranDict:
tranDict[key] = set()
# Loop through values in 1st dictionary
for v1 in valueList1:
# Extract and stroe values for entries in 2nd dictionary
if v1 in tran2:
valueList2 = tran2[v1]
for v2 in valueList2:
tranDict[key].add(v2)
# Convert sets to lists
tranDict = {k:list(v) for k,v in tranDict.iteritems()}
# Create log and return data
logDict = collections.defaultdict(int)
for values in tranDict.itervalues():
logDict[len(values)] += 1
return(tranDict, logDict)
#parse_gmt('/farm/scratch/rs-bio-lif/rabino01/myrtoDenaxa/genomeData/geneOntology/c2.all.v5.1.symbols.gmt')
#
#
#ensemblTran, ensemblLog = create_tran_dictionary('/farm/scratch/rs-bio-lif/rabino01/myrtoDenaxa/genomeData/mouse2Human/ensembl_mouse_human.txt')
#nameTran, nameLog = extract_ensembl_names('/farm/scratch/rs-bio-lif/rabino01/myrtoDenaxa/genomeData/mouse2Human/Homo_sapiens.GRCh38.84.gtf')
#finalTran, finalLog = span_translations(ensemblTran, nameTran)
#for inGene, outGene in finalTran.iteritems():
# if len(outGene) == 0:
# print('{}\t'.format(inGene))
# else:
# for gene in outGene:
# print('{}\t{}'.format(inGene,gene))
|
gpl-2.0
|
gnieboer/tensorflow
|
tensorflow/examples/learn/hdf5_classification.py
|
60
|
2190
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
learn = tf.contrib.learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
alvarofierroclavero/scikit-learn
|
examples/preprocessing/plot_robust_scaling.py
|
221
|
2702
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
|
bsd-3-clause
|
nomadcube/scikit-learn
|
sklearn/preprocessing/label.py
|
35
|
28877
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
|
bsd-3-clause
|
ZENGXH/scikit-learn
|
sklearn/tests/test_kernel_ridge.py
|
342
|
3027
|
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
|
bsd-3-clause
|
toscanosaul/water
|
saulSolution/visualizer.py
|
10
|
2005
|
#!/usr/bin/env python
"""
Visualize shallow water simulation results.
NB: Requires a modern Matplotlib version; also needs
either FFMPeg (for MP4) or ImageMagick (for GIF)
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as manimation
import sys
def main(infile="waves.out", outfile="out.mp4", startpic="start.png"):
"""Visualize shallow water simulation results.
Args:
infile: Name of input file generated by simulator
outfile: Desired output file (mp4 or gif)
startpic: Name of picture generated at first frame
"""
u = np.fromfile(infile, dtype=np.dtype('f4'))
nx = int(u[0])
ny = int(u[1])
x = range(0,nx)
y = range(0,ny)
u = u[2:]
nframe = len(u) // (nx*ny)
stride = nx // 20
u = np.reshape(u, (nframe,nx,ny))
X, Y = np.meshgrid(x,y)
fig = plt.figure(figsize=(10,10))
def plot_frame(i, stride=5):
ax = fig.add_subplot(111, projection='3d')
ax.set_zlim(0, 2)
Z = u[i,:,:];
ax.plot_surface(X, Y, Z, rstride=stride, cstride=stride)
return ax
if startpic:
ax = plot_frame(0)
plt.savefig(startpic)
plt.delaxes(ax)
metadata = dict(title='Wave animation', artist='Matplotlib')
if outfile[-4:] == ".mp4":
Writer = manimation.writers['ffmpeg']
writer = Writer(fps=15, metadata=metadata,
extra_args=["-r", "30",
"-c:v", "libx264",
"-pix_fmt", "yuv420p"])
elif outfile[-4:] == ".gif":
Writer = manimation.writers['imagemagick']
writer = Writer(fps=15, metadata=metadata)
with writer.saving(fig, outfile, nframe):
for i in range(nframe):
ax = plot_frame(i)
writer.grab_frame()
plt.delaxes(ax)
if __name__ == "__main__":
main(*sys.argv[1:])
|
mit
|
huggingface/pytorch-transformers
|
src/transformers/data/metrics/__init__.py
|
2
|
3783
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from ...file_utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from sklearn.metrics import f1_score, matthews_corrcoef
from scipy.stats import pearsonr, spearmanr
DEPRECATION_WARNING = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py"
)
def simple_accuracy(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(simple_accuracy, "sklearn")
return (preds == labels).mean()
def acc_and_f1(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(acc_and_f1, "sklearn")
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(pearson_and_spearman, "sklearn")
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def glue_compute_metrics(task_name, preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(glue_compute_metrics, "sklearn")
assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "hans":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def xnli_compute_metrics(task_name, preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(xnli_compute_metrics, "sklearn")
assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
|
apache-2.0
|
fbagirov/scikit-learn
|
examples/feature_selection/plot_permutation_test_for_classification.py
|
250
|
2233
|
"""
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
|
bsd-3-clause
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/spyder/utils/ipython/spyder_kernel.py
|
1
|
11148
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder kernel for Jupyter
"""
# Standard library imports
import os
# Third-party imports
from ipykernel.datapub import publish_data
from ipykernel.ipkernel import IPythonKernel
import ipykernel.pickleutil
from ipykernel.pickleutil import CannedObject
from ipykernel.serialize import deserialize_object
# Check if we are running under an external interpreter
IS_EXT_INTERPRETER = os.environ.get('EXTERNAL_INTERPRETER', '').lower() == "true"
# Local imports
if not IS_EXT_INTERPRETER:
from spyder.py3compat import is_text_string
from spyder.utils.dochelpers import isdefined, getdoc, getsource
from spyder.utils.iofuncs import iofunctions
from spyder.utils.misc import fix_reference_name
from spyder.widgets.variableexplorer.utils import (get_remote_data,
make_remote_view)
else:
# We add "spyder" to sys.path for external interpreters, so this works!
# See create_kernel_spec of plugins/ipythonconsole
from py3compat import is_text_string
from utils.dochelpers import isdefined, getdoc, getsource
from utils.iofuncs import iofunctions
from utils.misc import fix_reference_name
from widgets.variableexplorer.utils import (get_remote_data,
make_remote_view)
# XXX --- Disable canning for Numpy arrays for now ---
# This allows getting values between a Python 3 frontend
# and a Python 2 kernel, and viceversa, for several types of
# arrays.
# See this link for interesting ideas on how to solve this
# in the future:
# http://stackoverflow.com/q/30698004/438386
ipykernel.pickleutil.can_map.pop('numpy.ndarray')
# Excluded variables from the Variable Explorer (i.e. they are not
# shown at all there)
EXCLUDED_NAMES = ['In', 'Out', 'exit', 'get_ipython', 'quit']
class SpyderKernel(IPythonKernel):
"""Spyder kernel for Jupyter"""
def __init__(self, *args, **kwargs):
super(SpyderKernel, self).__init__(*args, **kwargs)
self.namespace_view_settings = {}
self._pdb_obj = None
self._pdb_step = None
@property
def _pdb_frame(self):
"""Return current Pdb frame if there is any"""
if self._pdb_obj is not None and self._pdb_obj.curframe is not None:
return self._pdb_obj.curframe
@property
def _pdb_locals(self):
"""
Return current Pdb frame locals if available. Otherwise
return an empty dictionary
"""
if self._pdb_frame:
return self._pdb_obj.curframe_locals
else:
return {}
# -- Public API ---------------------------------------------------
# --- For the Variable Explorer
def get_namespace_view(self):
"""
Return the namespace view
This is a dictionary with the following structure
{'a': {'color': '#800000', 'size': 1, 'type': 'str', 'view': '1'}}
Here:
* 'a' is the variable name
* 'color' is the color used to show it
* 'size' and 'type' are self-evident
* and'view' is its value or the text shown in the last column
"""
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
view = make_remote_view(ns, settings, EXCLUDED_NAMES)
return view
def get_var_properties(self):
"""
Get some properties of the variables in the current
namespace
"""
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
data = get_remote_data(ns, settings, mode='editable',
more_excluded_names=EXCLUDED_NAMES)
properties = {}
for name, value in list(data.items()):
properties[name] = {
'is_list': isinstance(value, (tuple, list)),
'is_dict': isinstance(value, dict),
'len': self._get_len(value),
'is_array': self._is_array(value),
'is_image': self._is_image(value),
'is_data_frame': self._is_data_frame(value),
'is_series': self._is_series(value),
'array_shape': self._get_array_shape(value),
'array_ndim': self._get_array_ndim(value)
}
return properties
else:
return {}
def get_value(self, name):
"""Get the value of a variable"""
ns = self._get_current_namespace()
value = ns[name]
publish_data({'__spy_data__': value})
def set_value(self, name, value):
"""Set the value of a variable"""
ns = self._get_reference_namespace(name)
value = deserialize_object(value)[0]
if isinstance(value, CannedObject):
value = value.get_object()
ns[name] = value
def remove_value(self, name):
"""Remove a variable"""
ns = self._get_reference_namespace(name)
ns.pop(name)
def copy_value(self, orig_name, new_name):
"""Copy a variable"""
ns = self._get_reference_namespace(orig_name)
ns[new_name] = ns[orig_name]
def load_data(self, filename, ext):
"""Load data from filename"""
glbs = self._mglobals()
load_func = iofunctions.load_funcs[ext]
data, error_message = load_func(filename)
if error_message:
return error_message
for key in list(data.keys()):
new_key = fix_reference_name(key, blacklist=list(glbs.keys()))
if new_key != key:
data[new_key] = data.pop(key)
try:
glbs.update(data)
except Exception as error:
return str(error)
return None
def save_namespace(self, filename):
"""Save namespace into filename"""
ns = self._get_current_namespace()
settings = self.namespace_view_settings
data = get_remote_data(ns, settings, mode='picklable',
more_excluded_names=EXCLUDED_NAMES).copy()
return iofunctions.save(data, filename)
# --- For Pdb
def get_pdb_step(self):
"""Return info about pdb current frame"""
return self._pdb_step
# --- For the Help plugin
def is_defined(self, obj, force_import=False):
"""Return True if object is defined in current namespace"""
ns = self._get_current_namespace(with_magics=True)
return isdefined(obj, force_import=force_import, namespace=ns)
def get_doc(self, objtxt):
"""Get object documentation dictionary"""
obj, valid = self._eval(objtxt)
if valid:
return getdoc(obj)
def get_source(self, objtxt):
"""Get object source"""
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj)
def set_cwd(self, dirname):
"""Set current working directory."""
return os.chdir(dirname)
# -- Private API ---------------------------------------------------
# --- For the Variable Explorer
def _get_current_namespace(self, with_magics=False):
"""
Return current namespace
This is globals() if not debugging, or a dictionary containing
both locals() and globals() for current frame when debugging
"""
ns = {}
glbs = self._mglobals()
if self._pdb_frame is None:
ns.update(glbs)
else:
ns.update(glbs)
ns.update(self._pdb_locals)
# Add magics to ns so we can show help about them on the Help
# plugin
if with_magics:
line_magics = self.shell.magics_manager.magics['line']
cell_magics = self.shell.magics_manager.magics['cell']
ns.update(line_magics)
ns.update(cell_magics)
return ns
def _get_reference_namespace(self, name):
"""
Return namespace where reference name is defined
It returns the globals() if reference has not yet been defined
"""
glbs = self._mglobals()
if self._pdb_frame is None:
return glbs
else:
lcls = self._pdb_locals
if name in lcls:
return lcls
else:
return glbs
def _mglobals(self):
"""Return current globals -- handles Pdb frames"""
if self._pdb_frame is not None:
return self._pdb_frame.f_globals
else:
return self.shell.user_ns
def _get_len(self, var):
"""Return sequence length"""
try:
return len(var)
except TypeError:
return None
def _is_array(self, var):
"""Return True if variable is a NumPy array"""
try:
import numpy
return isinstance(var, numpy.ndarray)
except ImportError:
return False
def _is_image(self, var):
"""Return True if variable is a PIL.Image image"""
try:
from PIL import Image
return isinstance(var, Image.Image)
except ImportError:
return False
def _is_data_frame(self, var):
"""Return True if variable is a DataFrame"""
try:
from pandas import DataFrame
return isinstance(var, DataFrame)
except:
return False
def _is_series(self, var):
"""Return True if variable is a Series"""
try:
from pandas import Series
return isinstance(var, Series)
except:
return False
def _get_array_shape(self, var):
"""Return array's shape"""
try:
if self._is_array(var):
return var.shape
else:
return None
except AttributeError:
return None
def _get_array_ndim(self, var):
"""Return array's ndim"""
try:
if self._is_array(var):
return var.ndim
else:
return None
except AttributeError:
return None
# --- For Pdb
def _register_pdb_session(self, pdb_obj):
"""Register Pdb session to use it later"""
self._pdb_obj = pdb_obj
def _set_spyder_breakpoints(self):
"""Set all Spyder breakpoints in an active pdb session"""
if not self._pdb_obj:
return
self._pdb_obj.set_spyder_breakpoints()
# --- For the Help plugin
def _eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
assert is_text_string(text)
ns = self._get_current_namespace(with_magics=True)
try:
return eval(text, ns), True
except:
return None, False
|
gpl-3.0
|
vlas-sokolov/multicube
|
examples/example-nh3.py
|
1
|
2598
|
"""
Exploring ammonia subcubes
"""
import numpy as np
import matplotlib.pylab as plt
from multicube.subcube import SubCube, SubCubeStack
from multicube.astro_toolbox import (make_test_cube, get_ncores,
tinker_ring_parspace)
from pyspeckit.spectrum.models.ammonia_constants import freq_dict
import pyspeckit
from astropy.io import fits
import astropy.units as u
xy_shape = (10, 10)
fittype = 'nh3_restricted_tex'
fitfunc = pyspeckit.spectrum.models.ammonia.ammonia_model_restricted_tex
model_kwargs = {'line_names': ['oneone', 'twotwo']}
npars, npeaks = 6, 1
# generating a dummy (gaussian) FITS file
make_test_cube(
(600, ) + xy_shape, outfile='foo.fits', sigma=(10, 5), writeSN=True)
def gauss_to_ammoniaJK(xy_pars, lfreq, fname='foo', **model_kwargs):
"""
Take a cube with a synthetic Gaussian "clump"
and replace the spectra with an ammonia one.
"""
spc = SubCube(fname + '.fits')
spc.specfit.Registry.add_fitter(fittype, fitfunc(**model_kwargs), npars)
spc.update_model(fittype)
spc.xarr.refX = lfreq
# replacing the gaussian spectra with ammonia ones
for (y, x) in np.ndindex(xy_pars.shape[1:]):
spc.cube[:, y, x] = spc.specfit.get_full_model(pars=xy_pars[:, y, x])
# adding noise to the nh3 cube (we lost it in the previous step)
spc.cube += fits.getdata(fname + '-noise.fits')
return spc
truepars = [12, 4, 15, 0.3, -25, 0.5, 0]
xy_pars = tinker_ring_parspace(truepars, xy_shape, [0, 2], [3, 1])
cubelst = [
gauss_to_ammoniaJK(xy_pars, freq_dict[line] * u.Hz, **model_kwargs)
for line in ['oneone', 'twotwo']
]
# creating a SubCubeStack instance from a list of SubCubes
cubes = SubCubeStack(cubelst)
cubes.update_model(fittype)
cubes.xarr.refX = freq_dict['oneone'] * u.Hz
cubes.xarr.velocity_convention = 'radio'
#cubes.xarr.convert_to_unit('km/s')
# setting up the grid of guesses and finding the one that matches best
minpars = [5, 3, 10.0, 0.1, -30, 0.5, 0]
maxpars = [25, 7, 20.0, 1.0, -20, 0.5, 10]
fixed = [False, False, False, False, False, True, False]
finesse = [5, 3, 5, 4, 4, 1, 1]
cubes.make_guess_grid(minpars, maxpars, finesse, fixed=fixed)
cubes.generate_model(multicore=get_ncores())
cubes.best_guess()
rmsmap = cubes.slice(-37, -27, unit='km/s').cube.std(axis=0)
# fitting the cube with best guesses for each pixel
cubes.fiteach(
fittype=cubes.fittype,
guesses=cubes.best_guesses,
multicore=get_ncores(),
errmap=rmsmap,
**cubes.fiteach_args)
# plot ammonia gas temperature
cubes.show_fit_param(0, cmap='viridis')
plt.ion()
plt.show()
|
mit
|
zorojean/scikit-learn
|
sklearn/covariance/graph_lasso_.py
|
127
|
25626
|
"""GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
|
bsd-3-clause
|
mrocklin/blaze
|
blaze/compute/pandas.py
|
1
|
14766
|
"""
>>> from blaze.expr import symbol
>>> from blaze.compute.pandas import compute
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> deadbeats = accounts[accounts['amount'] < 0]['name']
>>> from pandas import DataFrame
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> df = DataFrame(data, columns=['name', 'amount'])
>>> compute(deadbeats, df)
1 Bob
2 Charlie
Name: name, dtype: object
"""
from __future__ import absolute_import, division, print_function
import pandas as pd
from pandas.core.generic import NDFrame
from pandas import DataFrame, Series
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
import numpy as np
from toolz import merge as merge_dicts
from toolz.curried import pipe, filter, map, concat
import fnmatch
from datashape.predicates import isscalar
import datashape
import itertools
from odo import into
from ..dispatch import dispatch
from ..expr import (Projection, Field, Sort, Head, Broadcast, Selection,
Reduction, Distinct, Join, By, Summary, Label, ReLabel,
Map, Apply, Merge, std, var, Like, Slice, summary,
ElemWise, DateTime, Millisecond, Expr, Symbol,
UTCFromTimestamp, nelements, DateTimeTruncate, count,
UnaryStringFunction)
from ..expr import UnaryOp, BinOp
from ..expr import symbol, common_subexpression
from .core import compute, compute_up, base
from ..compatibility import _inttypes
__all__ = []
@dispatch(Projection, DataFrame)
def compute_up(t, df, **kwargs):
return df[list(t.fields)]
@dispatch(Field, (DataFrame, DataFrameGroupBy))
def compute_up(t, df, **kwargs):
assert len(t.fields) == 1
return df[t.fields[0]]
@dispatch(Field, Series)
def compute_up(t, data, **kwargs):
assert len(t.fields) == 1
if t.fields[0] == data.name:
return data
else:
raise ValueError("Fieldname %r does not match Series name %r"
% (t.fields[0], data.name))
@dispatch(Broadcast, DataFrame)
def compute_up(t, df, **kwargs):
d = dict((t._child[c]._expr, df[c]) for c in t._child.fields)
return compute(t._expr, d)
@dispatch(Broadcast, Series)
def compute_up(t, s, **kwargs):
return compute_up(t, s.to_frame(), **kwargs)
@dispatch(BinOp, Series)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return t.op(data, t.rhs)
else:
return t.op(t.lhs, data)
@dispatch(BinOp, Series, (Series, base))
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, (Series, base), Series)
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(UnaryOp, NDFrame)
def compute_up(t, df, **kwargs):
f = getattr(t, 'op', getattr(np, t.symbol, None))
if f is None:
raise ValueError('%s is not a valid operation on %s objects' %
(t.symbol, type(df).__name__))
return f(df)
@dispatch(Selection, (Series, DataFrame))
def compute_up(t, df, **kwargs):
predicate = compute(t.predicate, {t._child: df})
return df[predicate]
@dispatch(Join, DataFrame, DataFrame)
def compute_up(t, lhs, rhs, **kwargs):
""" Join two pandas data frames on arbitrary columns
The approach taken here could probably be improved.
To join on two columns we force each column to be the index of the
dataframe, perform the join, and then reset the index back to the left
side's original index.
"""
result = pd.merge(lhs, rhs,
left_on=t.on_left, right_on=t.on_right,
how=t.how)
return result.reset_index()[t.fields]
@dispatch(Symbol, (DataFrameGroupBy, SeriesGroupBy))
def compute_up(t, gb, **kwargs):
return gb
def get_scalar(result):
# pandas may return an int, numpy scalar or non scalar here so we need to
# program defensively so that things are JSON serializable
try:
return result.item()
except (AttributeError, ValueError):
return result
@dispatch(Reduction, (Series, SeriesGroupBy))
def compute_up(t, s, **kwargs):
result = get_scalar(getattr(s, t.symbol)())
if t.keepdims:
result = Series([result], name=s.name)
return result
@dispatch((std, var), (Series, SeriesGroupBy))
def compute_up(t, s, **kwargs):
result = get_scalar(getattr(s, t.symbol)(ddof=t.unbiased))
if t.keepdims:
result = Series([result], name=s.name)
return result
@dispatch(Distinct, (DataFrame, Series))
def compute_up(t, df, **kwargs):
return df.drop_duplicates().reset_index(drop=True)
string_func_names = {
'strlen': 'len',
}
@dispatch(UnaryStringFunction, Series)
def compute_up(expr, data, **kwargs):
name = type(expr).__name__
return getattr(data.str, string_func_names.get(name, name))()
def unpack(seq):
""" Unpack sequence of length one
>>> unpack([1, 2, 3])
[1, 2, 3]
>>> unpack([1])
1
"""
seq = list(seq)
if len(seq) == 1:
seq = seq[0]
return seq
Grouper = ElemWise, Series, list
@dispatch(By, list, DataFrame)
def get_grouper(c, grouper, df):
return grouper
@dispatch(By, Expr, NDFrame)
def get_grouper(c, grouper, df):
g = compute(grouper, {c._child: df})
if isinstance(g, Series):
return g
if isinstance(g, DataFrame):
return [g[col] for col in g.columns]
@dispatch(By, (Field, Projection), NDFrame)
def get_grouper(c, grouper, df):
return grouper.fields
@dispatch(By, Reduction, Grouper, NDFrame)
def compute_by(t, r, g, df):
names = [r._name]
preapply = compute(r._child, {t._child: df})
# Pandas and Blaze column naming schemes differ
# Coerce DataFrame column names to match Blaze's names
preapply = preapply.copy()
if isinstance(preapply, Series):
preapply.name = names[0]
else:
preapply.names = names
group_df = concat_nodup(df, preapply)
gb = group_df.groupby(g)
groups = gb[names[0] if isscalar(t.apply._child.dshape.measure) else names]
return compute_up(r, groups) # do reduction
name_dict = dict()
seen_names = set()
def _name(expr):
""" A unique and deterministic name for an expression """
if expr in name_dict:
return name_dict[expr]
result = base = expr._name or '_'
if result in seen_names:
for i in itertools.count(1):
result = '%s_%d' % (base, i)
if result not in seen_names:
break
# result is an unseen name
seen_names.add(result)
name_dict[expr] = result
return result
def fancify_summary(expr):
""" Separate a complex summary into two pieces
Helps pandas compute_by on summaries
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> one, two, three = fancify_summary(summary(a=t.x.sum(), b=t.x.sum() + t.y.count() - 1))
A simpler summary with only raw reductions
>>> one
summary(x_sum=sum(t.x), y_count=count(t.y))
A mapping of those names to new leaves to use in another compuation
>>> two # doctest: +SKIP
{'x_sum': x_sum, 'y_count': y_count}
A mapping of computations to do for each column
>>> three # doctest: +SKIP
{'a': x_sum, 'b': (x_sum + y_count) - 1}
In this way, ``compute_by`` is able to do simple pandas reductions using
groups.agg(...) and then do columnwise arithmetic afterwards.
"""
seen_names.clear()
name_dict.clear()
exprs = pipe(expr.values,
map(Expr._traverse),
concat,
filter(lambda x: isinstance(x, Reduction)),
set)
one = summary(**dict((_name(expr), expr) for expr in exprs))
two = dict((_name(expr), symbol(_name(expr), datashape.var * expr.dshape))
for expr in exprs)
d = dict((expr, two[_name(expr)]) for expr in exprs)
three = dict((name, value._subs(d)) for name, value in zip(expr.names,
expr.values))
return one, two, three
@dispatch(By, Summary, Grouper, NDFrame)
def compute_by(t, s, g, df):
one, two, three = fancify_summary(s) # see above
names = one.fields
preapply = DataFrame(dict(zip(names,
[compute(v._child, {t._child: df})
for v in one.values])))
df2 = concat_nodup(df, preapply)
groups = df2.groupby(g)
d = dict((name, v.symbol) for name, v in zip(one.names, one.values))
result = groups.agg(d)
scope = dict((v, result[k]) for k, v in two.items())
cols = [compute(expr.label(name), scope) for name, expr in three.items()]
result2 = pd.concat(cols, axis=1)
# Rearrange columns to match names order
result3 = result2[sorted(result2.columns, key=lambda t: s.fields.index(t))]
return result3
@dispatch(Expr, DataFrame)
def post_compute_by(t, df):
return df.reset_index(drop=True)
@dispatch((Summary, Reduction), DataFrame)
def post_compute_by(t, df):
return df.reset_index()
@dispatch(By, NDFrame)
def compute_up(t, df, **kwargs):
grouper = get_grouper(t, t.grouper, df)
result = compute_by(t, t.apply, grouper, df)
result2 = post_compute_by(t.apply, into(DataFrame, result))
if isinstance(result2, DataFrame):
result2.columns = t.fields
return result2
def concat_nodup(a, b):
""" Concatenate two dataframes/series without duplicately named columns
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Charlie', 300]],
... columns=['id','name', 'amount'])
>>> concat_nodup(df, df)
id name amount
0 1 Alice 100
1 2 Bob -200
2 3 Charlie 300
>>> concat_nodup(df.name, df.amount)
name amount
0 Alice 100
1 Bob -200
2 Charlie 300
>>> concat_nodup(df, df.amount + df.id)
id name amount 0
0 1 Alice 100 101
1 2 Bob -200 -198
2 3 Charlie 300 303
"""
if isinstance(a, DataFrame) and isinstance(b, DataFrame):
return pd.concat([a, b[[c for c in b.columns if c not in a.columns]]],
axis=1)
if isinstance(a, DataFrame) and isinstance(b, Series):
if b.name not in a.columns:
return pd.concat([a, b], axis=1)
else:
return a
if isinstance(a, Series) and isinstance(b, DataFrame):
return pd.concat([a, b[[c for c in b.columns if c != a.name]]], axis=1)
if isinstance(a, Series) and isinstance(b, Series):
if a.name == b.name:
return a
else:
return pd.concat([a, b], axis=1)
@dispatch(Sort, DataFrame)
def compute_up(t, df, **kwargs):
return df.sort(t.key, ascending=t.ascending)
@dispatch(Sort, Series)
def compute_up(t, s, **kwargs):
return s.order(ascending=t.ascending)
@dispatch(Head, (Series, DataFrame))
def compute_up(t, df, **kwargs):
return df.head(t.n)
@dispatch(Label, DataFrame)
def compute_up(t, df, **kwargs):
return DataFrame(df, columns=[t.label])
@dispatch(Label, Series)
def compute_up(t, df, **kwargs):
return Series(df, name=t.label)
@dispatch(ReLabel, DataFrame)
def compute_up(t, df, **kwargs):
return df.rename(columns=dict(t.labels))
@dispatch(ReLabel, Series)
def compute_up(t, s, **kwargs):
labels = t.labels
if len(labels) > 1:
raise ValueError('You can only relabel a Series with a single name')
pair, = labels
_, replacement = pair
return Series(s, name=replacement)
@dispatch(Map, DataFrame)
def compute_up(t, df, **kwargs):
return df.apply(lambda tup: t.func(*tup), axis=1)
@dispatch(Map, Series)
def compute_up(t, df, **kwargs):
result = df.map(t.func)
try:
result.name = t._name
except NotImplementedError:
# We don't have a schema, but we should still be able to map
result.name = df.name
return result
@dispatch(Apply, (Series, DataFrame))
def compute_up(t, df, **kwargs):
return t.func(df)
@dispatch(Merge, NDFrame)
def compute_up(t, df, scope=None, **kwargs):
subexpression = common_subexpression(*t.children)
scope = merge_dicts(scope or {}, {subexpression: df})
children = [compute(_child, scope) for _child in t.children]
return pd.concat(children, axis=1)
@dispatch(Summary, DataFrame)
def compute_up(expr, data, **kwargs):
values = [compute(val, {expr._child: data}) for val in expr.values]
if expr.keepdims:
return DataFrame([values], columns=expr.fields)
else:
return Series(dict(zip(expr.fields, values)))
@dispatch(Summary, Series)
def compute_up(expr, data, **kwargs):
result = tuple(compute(val, {expr._child: data}) for val in expr.values)
if expr.keepdims:
result = [result]
return result
@dispatch(Like, DataFrame)
def compute_up(expr, df, **kwargs):
arrs = [df[name].str.contains('^%s$' % fnmatch.translate(pattern))
for name, pattern in expr.patterns.items()]
return df[np.logical_and.reduce(arrs)]
def get_date_attr(s, attr):
try:
# new in pandas 0.15
return getattr(s.dt, attr)
except AttributeError:
return getattr(pd.DatetimeIndex(s), attr)
@dispatch(DateTime, Series)
def compute_up(expr, s, **kwargs):
return get_date_attr(s, expr.attr)
@dispatch(UTCFromTimestamp, Series)
def compute_up(expr, s, **kwargs):
return pd.datetools.to_datetime(s * 1e9, utc=True)
@dispatch(Millisecond, Series)
def compute_up(_, s, **kwargs):
return get_date_attr(s, 'microsecond') // 1000
@dispatch(Slice, (DataFrame, Series))
def compute_up(expr, df, **kwargs):
index = expr.index
if isinstance(index, tuple) and len(index) == 1:
index = index[0]
if isinstance(index, _inttypes + (list,)):
return df.iloc[index]
elif isinstance(index, slice):
if index.stop is not None:
return df.iloc[index.start:index.stop:index.step]
else:
return df.iloc[index]
else:
raise NotImplementedError()
@dispatch(count, DataFrame)
def compute_up(expr, df, **kwargs):
result = df.shape[0]
if expr.keepdims:
result = Series([result])
return result
@dispatch(nelements, (DataFrame, Series))
def compute_up(expr, df, **kwargs):
return df.shape[0]
units_map = {
'year': 'Y',
'month': 'M',
'week': 'W',
'day': 'D',
'hour': 'h',
'minute': 'm',
'second': 's',
'millisecond': 'ms',
'microsecond': 'us',
'nanosecond': 'ns'
}
@dispatch(DateTimeTruncate, Series)
def compute_up(expr, data, **kwargs):
return Series(compute_up(expr, into(np.ndarray, data), **kwargs))
|
bsd-3-clause
|
picca/hkl
|
tests/bindings/trajectory.py
|
1
|
4521
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import rcParams
from gi.repository import GLib
from gi.repository import Hkl
def compute_hkl_trajectories(engines, engine, hkl1=None, hkl2=None, n=100):
"""
compute all the trajectories for a given engine already configured
"""
if not hkl1:
hkl1 = [0, 0, 1]
if not hkl2:
hkl2 = [0, 1, 1]
h = numpy.linspace(hkl1[0], hkl2[0], n + 1)
k = numpy.linspace(hkl1[1], hkl2[1], n + 1)
l = numpy.linspace(hkl1[2], hkl2[2], n + 1)
# set the hkl engine and get the results
trajectories = []
for hh, kk, ll in zip(h, k, l):
try:
solutions = engine.pseudo_axis_values_set([hh, kk, ll],
Hkl.UnitEnum.USER)
first_solution = solutions.items()[0]
for i, item in enumerate(solutions.items()):
try:
trajectories[i]
except IndexError:
trajectories.append([])
values = item.geometry_get().axis_values_get(Hkl.UnitEnum.USER)
trajectories[i].append(values)
engines.select_solution(first_solution)
except GLib.GError, err:
pass
return trajectories
def _plot_legend(axes):
plt.subplot(3, 4, 1)
plt.title("legend")
print "legende", 1
for name in axes:
plt.plot([0, 0], label=name)
plt.legend()
def plot_hkl_trajectory(filename, geometry, engines,
hkl1=None, hkl2=None, n=100):
"""
plot the trajectory for a engine. It is possible to limit the
number of trajectory using the max_traj keyword
"""
axis_names = geometry.axis_names_get()
hkl = engines.engine_get_by_name("hkl")
page = 1
plt.clf()
plt.suptitle("\"" + filename + "\" " + repr(
hkl1) + " -> " + repr(hkl2) + " page " + str(page))
_plot_legend(axis_names)
idx = 2
for mode in hkl.modes_names_get():
hkl.current_mode_set(mode)
trajectories = compute_hkl_trajectories(engines, hkl, hkl1=hkl1, hkl2=hkl2, n=n)
print "\"" + filename + "\"", idx, mode, len(trajectories)
plt.subplot(3, 4, idx)
plt.title(mode)
if not len(trajectories):
plt.text(0.5, 0.5, "Failed", size=20, rotation=0.,
ha="center", va="center",
bbox=dict(boxstyle="round",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
plt.draw()
else:
plt.ylim(-180, 180)
if len(trajectories[0]) == 1:
plt.plot(trajectories[0], 'o-')
else:
plt.plot(trajectories[0], '-')
idx += 1
if idx > 12:
pp.savefig()
plt.clf()
page += 1
_plot_legend(axis_names)
plt.suptitle(filename + " " + repr(
hkl1) + " -> " + repr(hkl2) + " page " + str(page))
idx = 2
pp.savefig()
pp = PdfPages('trajectories.pdf')
rcParams['font.size'] = 6
def main():
sample = Hkl.Sample.new("toto")
lattice = Hkl.Lattice.new(1.54, 1.54, 1.54,
math.radians(90.),
math.radians(90.),
math.radians(90.))
sample.lattice_set(lattice)
detector = Hkl.Detector.factory_new(Hkl.DetectorType(0))
for key, factory in Hkl.factories().iteritems():
geometry = factory.create_new_geometry()
engines = factory.create_new_engine_list()
# here we set the detector arm with only positiv values for
# now tth or delta arm
for axis in geometry.axis_names_get():
if axis in ["tth", "delta"]:
tmp = geometry.axis_get(axis)
tmp.min_max_set(0, 180., Hkl.UnitEnum.USER)
geometry.axis_set(axis, tmp)
engines.init(geometry, detector, sample)
engines_names = [engine.name_get() for engine in engines.engines_get()]
if 'hkl' in engines_names:
plot_hkl_trajectory(key, geometry, engines,
hkl1=[0, 0, 1], hkl2=[0, 1, 1], n=100)
pp.close()
if __name__ == '__main__':
main()
|
gpl-3.0
|
ab93/Depression-Identification
|
src/feature_extract/extract_COVAREP_FORMANT.py
|
1
|
15195
|
import pandas as pd
from glob import glob
import numpy as np
import re
import csv
import sys
followUp = {}
ack = {}
nonIntimate = {}
intimate = {}
featureList = {}
'''headers for COVAREP features'''
header = ["video", "question", "starttime", "endtime", 'F0_mean', 'VUV_mean', 'NAQ_mean', 'QOQ_mean', 'H1H2_mean',
'PSP_mean', 'MDQ_mean', 'peakSlope_mean', 'Rd_mean', 'Rd_conf_mean', 'creak_mean', 'MCEP_0_mean',
'MCEP_1_mean', 'MCEP_2_mean', 'MCEP_3_mean', 'MCEP_4_mean', 'MCEP_5_mean', 'MCEP_6_mean', 'MCEP_7_mean',
'MCEP_8_mean', 'MCEP_9_mean', 'MCEP_10_mean', 'MCEP_11_mean', 'MCEP_12_mean', 'MCEP_13_mean', 'MCEP_14_mean',
'MCEP_15_mean', 'MCEP_16_mean', 'MCEP_17_mean', 'MCEP_18_mean', 'MCEP_19_mean', 'MCEP_20_mean',
'MCEP_21_mean', 'MCEP_22_mean', 'MCEP_23_mean', 'MCEP_24_mean', 'HMPDM_0_mean', 'HMPDM_1_mean',
'HMPDM_2_mean', 'HMPDM_3_mean', 'HMPDM_4_mean', 'HMPDM_5_mean', 'HMPDM_6_mean', 'HMPDM_7_mean',
'HMPDM_8_mean', 'HMPDM_9_mean', 'HMPDM_10_mean', 'HMPDM_11_mean', 'HMPDM_12_mean', 'HMPDM_13_mean',
'HMPDM_14_mean', 'HMPDM_15_mean', 'HMPDM_16_mean', 'HMPDM_17_mean', 'HMPDM_18_mean', 'HMPDM_19_mean',
'HMPDM_20_mean', 'HMPDM_21_mean', 'HMPDM_22_mean', 'HMPDM_23_mean', 'HMPDM_24_mean', 'HMPDD_0_mean',
'HMPDD_1_mean', 'HMPDD_2_mean', 'HMPDD_3_mean', 'HMPDD_4_mean', 'HMPDD_5_mean', 'HMPDD_6_mean',
'HMPDD_7_mean', 'HMPDD_8_mean', 'HMPDD_9_mean', 'HMPDD_10_mean', 'HMPDD_11_mean', 'HMPDD_12_mean',
'F0_stddev', 'VUV_stddev', 'NAQ_stddev', 'QOQ_stddev', 'H1H2_stddev', 'PSP_stddev', 'MDQ_stddev',
'peakSlope_stddev', 'Rd_stddev', 'Rd_conf_stddev', 'creak_stddev', 'MCEP_0_stddev', 'MCEP_1_stddev',
'MCEP_2_stddev', 'MCEP_3_stddev', 'MCEP_4_stddev', 'MCEP_5_stddev', 'MCEP_6_stddev', 'MCEP_7_stddev',
'MCEP_8_stddev', 'MCEP_9_stddev', 'MCEP_10_stddev', 'MCEP_11_stddev', 'MCEP_12_stddev', 'MCEP_13_stddev',
'MCEP_14_stddev', 'MCEP_15_stddev', 'MCEP_16_stddev', 'MCEP_17_stddev', 'MCEP_18_stddev', 'MCEP_19_stddev',
'MCEP_20_stddev', 'MCEP_21_stddev', 'MCEP_22_stddev', 'MCEP_23_stddev', 'MCEP_24_stddev', 'HMPDM_0_stddev',
'HMPDM_1_stddev', 'HMPDM_2_stddev', 'HMPDM_3_stddev', 'HMPDM_4_stddev', 'HMPDM_5_stddev', 'HMPDM_6_stddev',
'HMPDM_7_stddev', 'HMPDM_8_stddev', 'HMPDM_9_stddev', 'HMPDM_10_stddev', 'HMPDM_11_stddev', 'HMPDM_12_stddev',
'HMPDM_13_stddev', 'HMPDM_14_stddev', 'HMPDM_15_stddev', 'HMPDM_16_stddev', 'HMPDM_17_stddev',
'HMPDM_18_stddev', 'HMPDM_19_stddev', 'HMPDM_20_stddev', 'HMPDM_21_stddev', 'HMPDM_22_stddev',
'HMPDM_23_stddev', 'HMPDM_24_stddev', 'HMPDD_0_stddev', 'HMPDD_1_stddev', 'HMPDD_2_stddev', 'HMPDD_3_stddev',
'HMPDD_4_stddev', 'HMPDD_5_stddev', 'HMPDD_6_stddev', 'HMPDD_7_stddev', 'HMPDD_8_stddev', 'HMPDD_9_stddev',
'HMPDD_10_stddev', 'HMPDD_11_stddev', 'HMPDD_12_stddev', 'gender']
'''headers for FORMANT features'''
header_f = ["video", "question", "starttime", "endtime", 'formant1_mean', 'formant2_mean', 'formant3_mean',
'formant4_mean', 'formant5_mean', 'formant1_stddev', 'formant2_stddev', 'formant3_stddev',
'formant4_stddev', 'formant5_stddev', 'gender']
questionType_DND = {}
questionType_PN = {}
questionAnswers = {}
'''
Reads DND questions and PN questions.
Retrieves acknowledgements, follow ups, intimate and non intimate questions and stores in global variables
'''
def readHelperData():
global followUp, ack, nonIntimate, intimate, questionType_PN, questionType_DND
utterrances = pd.read_csv('data/misc/IdentifyingFollowUps.csv')
disc_nondisc = pd.read_csv('data/misc/DND_Annotations.csv')
pos_neg = pd.read_csv('data/misc/PN_Annotations.csv')
# Discriminative/Non-discriminative annotations
for i in xrange(len(disc_nondisc)):
question = disc_nondisc.iloc[i]['Questions']
qType = disc_nondisc.iloc[i]['Annotations']
questionType_DND[question] = qType
# Positive/Negative annotations
for i in xrange(len(pos_neg)):
question = pos_neg.iloc[i]['Questions']
qType = pos_neg.iloc[i]['Annotations']
questionType_PN[question] = qType
for item in utterrances.itertuples():
if item[3] == "#follow_up" and item[1] not in followUp:
followUp[item[1]] = item[2]
elif item[3] == "#ack" and item[1] not in ack:
ack[item[1]] = item[2]
elif item[3] == "#non_int" and item[1] not in nonIntimate:
nonIntimate[item[1]] = item[2]
elif item[3] == "#int" and item[1] not in intimate:
intimate[item[1]] = item[2]
'''
Reads transcripts, captures the start and end times of the answers for most frequent intimate questions. Also captures the start and end times of follow up questions that are following most frequent intimate questions
'''
def readTranscript():
global featureList
transcriptFiles = glob(sys.argv[1] + '[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(0, len(transcriptFiles)):
t = pd.read_csv(transcriptFiles[i], delimiter=',|\t', engine='python')
t = t.fillna("")
captureStarted = False
startTime = 0.0
endTime = 0.0
prevQuestion = ""
participantNo = transcriptFiles[i][-18:-15]
for j in xrange(len(t)):
question = re.search(".*\((.*)\)$", t.iloc[j]['value'])
if question is not None:
question = question.group(1)
else:
question = t.iloc[j]['value']
question = question.strip()
if t.iloc[j]['speaker'] == 'Ellie':
endTime = t.iloc[j]['start_time']
if question in nonIntimate and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in intimate and question in questionType_DND and captureStarted:
endTime = t.iloc[j]['start_time']
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
elif question in intimate and question in questionType_DND and not captureStarted:
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
captureStarted = True
elif question in intimate and question not in questionType_DND and captureStarted:
endTime = t.iloc[j]['start_time']
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in followUp or question in ack and captureStarted:
endTime = t.iloc[j]['stop_time']
elif t.iloc[j]['speaker'] == 'Participant' and captureStarted:
# endTime=t.iloc[j]['stop_time']
continue
'''
Generates features from FORMANT files considering the start and end times for each frequent intimate questions from PN list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readFORMANT_DND():
print 'FORMANT DND'
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
dFile = open('data/disc_nondisc/discriminative_FORMANT.csv', 'w')
ndFile = open('data/disc_nondisc/nondiscriminative_FORMANT.csv', 'w')
dWriter = csv.writer(dFile)
ndWriter = csv.writer(ndFile)
dWriter.writerow(header_f)
ndWriter.writerow(header_f)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_FORMANT.csv'
f = pd.read_csv(fileName, delimiter=',|\t', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = startTime * 100
endFrame = endTime * 100
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
vector = instance[1][:]
vector += features_mean
vector += features_stddev
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
if questionType_DND[instance[0]] == 'D':
dWriter.writerow(vector)
else:
ndWriter.writerow(vector)
'''
Generates features from FORMANT files considering the start and end times for each frequent intimate questions from PN list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readFORMANT_PN():
print 'FORMANT PN'
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
pFile = open('data/pos_neg/positive_FORMANT.csv', 'w')
nFile = open('data/pos_neg/negative_FORMANT.csv', 'w')
pWriter = csv.writer(pFile)
nWriter = csv.writer(nFile)
pWriter.writerow(header_f)
nWriter.writerow(header_f)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_FORMANT.csv'
f = pd.read_csv(fileName, delimiter=',|\t', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = startTime * 100
endFrame = endTime * 100
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
vector = instance[1][:]
vector += features_mean
vector += features_stddev
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
if questionType_PN[instance[0]] == 'P':
pWriter.writerow(vector)
else:
nWriter.writerow(vector)
'''
Generates features from COVAREP files considering the start and end times for each frequent intimate questions from DND list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readCOVAREP_DND():
print 'COVAREP DND'
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
dFile = open('data/disc_nondisc/discriminative_COVAREP.csv', 'w')
ndFile = open('data/disc_nondisc/nondiscriminative_COVAREP.csv', 'w')
dWriter = csv.writer(dFile)
ndWriter = csv.writer(ndFile)
dWriter.writerow(header)
ndWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_COVAREP.csv'
f = pd.read_csv(fileName, delimiter=',|\t', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = startTime * 100
endFrame = endTime * 100
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
vector = instance[1][:]
vector += features_mean
vector += features_stddev
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
if questionType_DND[instance[0]] == 'D':
dWriter.writerow(vector)
else:
ndWriter.writerow(vector)
'''
Generates features from COVAREP files considering the start and end times for each frequent intimate questions from PN list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readCOVAREP_PN():
print 'COVAREP PN'
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
pFile = open('data/pos_neg/positive_COVAREP.csv', 'w')
nFile = open('data/pos_neg/negative_COVAREP.csv', 'w')
pWriter = csv.writer(pFile)
nWriter = csv.writer(nFile)
pWriter.writerow(header)
nWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_COVAREP.csv'
f = pd.read_csv(fileName, delimiter=',|\t', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = startTime * 100
endFrame = endTime * 100
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
vector = instance[1][:]
vector += features_mean
vector += features_stddev
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
if questionType_PN[instance[0]] == 'P':
pWriter.writerow(vector)
else:
nWriter.writerow(vector)
if __name__ == "__main__":
readHelperData()
readTranscript()
readFORMANT_DND()
readFORMANT_PN()
readCOVAREP_DND()
readCOVAREP_PN()
|
mit
|
mlperf/training_results_v0.7
|
Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-512/lingvo/core/plot.py
|
1
|
17763
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for generating image summaries using matplotlib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import traceback
import REDACTED.transformer_lingvo.lingvo.compat as tf
from REDACTED.transformer_lingvo.lingvo.core import py_utils
from matplotlib.backends import backend_agg
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import six
from six.moves import range
from six.moves import zip
def ToUnicode(text):
if not isinstance(text, six.text_type):
text = six.ensure_text(text, 'utf-8')
return text
def AddPlot(unused_fig,
axes,
data,
title=u'',
xlabel=u'',
ylabel=u'',
fontsize='small',
xlim=None,
ylim=None,
suppress_xticks=False,
suppress_yticks=False):
"""Convenience function to add a plot."""
axes.plot(data)
axes.set_title(ToUnicode(title), size=fontsize)
axes.set_xlabel(ToUnicode(xlabel), size=fontsize)
axes.set_ylabel(ToUnicode(ylabel), size=fontsize)
if xlim:
axes.set_xlim(xlim)
if ylim:
axes.set_ylim(ylim)
if suppress_xticks:
axes.set_xticks([])
if suppress_yticks:
axes.set_yticks([])
def AddImage(fig,
axes,
data,
cmap='bone_r',
clim=None,
show_colorbar=True,
title=u'',
xlabel=u'',
ylabel=u'',
fontsize='small',
origin='lower',
suppress_xticks=False,
suppress_yticks=False,
aspect='auto',
vmin=None,
vmax=None):
"""Convenience function to plot data as an image on the given axes."""
image = axes.imshow(
data,
cmap=cmap,
origin=origin,
aspect=aspect,
interpolation='nearest',
vmin=vmin,
vmax=vmax)
if show_colorbar:
fig.colorbar(image)
if clim is not None:
image.set_clim(clim)
axes.set_title(ToUnicode(title), size=fontsize)
axes.set_xlabel(ToUnicode(xlabel), size=fontsize)
axes.set_ylabel(ToUnicode(ylabel), size=fontsize)
if suppress_xticks:
axes.set_xticks([])
if suppress_yticks:
axes.set_yticks([])
def AddScatterPlot(unused_fig,
axes,
xs,
ys,
title=u'',
xlabel=u'',
ylabel=u'',
fontsize='small',
xlim=None,
ylim=None,
suppress_xticks=False,
suppress_yticks=False,
**kwargs):
"""Convenience function to add a scatter plot."""
# For 3D axes, check to see whether zlim is specified and apply it.
if 'zlim' in kwargs:
zlim = kwargs.pop('zlim')
if zlim:
axes.set_zlim(zlim)
axes.scatter(xs, ys, **kwargs)
axes.set_title(ToUnicode(title), size=fontsize)
axes.set_xlabel(ToUnicode(xlabel), size=fontsize)
axes.set_ylabel(ToUnicode(ylabel), size=fontsize)
if xlim:
axes.set_xlim(xlim)
if ylim:
axes.set_ylim(ylim)
if suppress_xticks:
axes.set_xticks([])
if suppress_yticks:
axes.set_yticks([])
_SubplotMetadata = collections.namedtuple('_SubplotMetadata',
['tensor_list', 'plot_func'])
class MatplotlibFigureSummary(object):
"""Helper to minimize boilerplate in creating a summary with several subplots.
Typical usage::
>>> fig_helper = plot.MatplotlibFigureSummary(
... 'summary_name', shared_subplot_kwargs={'xlabel': 'Time'})
>>> fig_helper.AddSubplot([tensor1], title='tensor1')
>>> fig_helper.AddSubplot([tensor2], title='tensor2', ylabel='Frequency')
>>> image_summary = fig_helper.Finalize()
Can also be used as a context manager if the caller does not need the return
value from Finalize(), e.g.
>>> with plot.MatplotlibFigureSummary('figure') as fig:
... fig.AddSubplot([tensor1])
"""
def __init__(self,
name,
figsize=(8, 10),
max_outputs=3,
subplot_grid_shape=None,
gridspec_kwargs=None,
plot_func=AddImage,
shared_subplot_kwargs=None):
"""Creates a new MatplotlibFigureSummary object.
Args:
name: A string name for the generated summary.
figsize: A 2D tuple containing the overall figure (width, height)
dimensions in inches.
max_outputs: The maximum number of images to generate.
subplot_grid_shape: A 2D tuple containing the height and width dimensions
of the subplot grid. height * width must be >= the number of subplots.
Defaults to (num_subplots, 1), i.e. a vertical stack of plots.
gridspec_kwargs: A dict of extra keyword args to use when initializing the
figure's gridspec, as supported by matplotlib.gridspec.GridSpec.
plot_func: A function shared across all subplots used to populate a single
subplot. See the docstring for AddSubplot for details.
shared_subplot_kwargs: A dict of extra keyword args to pass to the plot
function for all subplots. This is useful for specifying properties
such as 'clim' which should be consistent across all subplots.
"""
self._name = name
self._figsize = figsize
self._max_outputs = max_outputs
self._subplot_grid_shape = subplot_grid_shape
self._gridspec_kwargs = gridspec_kwargs if gridspec_kwargs else {}
self._plot_func = plot_func
self._shared_subplot_kwargs = (
shared_subplot_kwargs if shared_subplot_kwargs else {})
self._subplots = []
def __enter__(self):
return self
def __exit__(self, unused_exc_type, unused_exc_value, unused_tb):
self.Finalize()
def AddSubplot(self, tensor_list, plot_func=None, **kwargs):
r"""Adds a subplot from tensors using plot_fun to populate the subplot axes.
Args:
tensor_list: A list of tensors to be realized as numpy arrays and passed
as arguments to plot_func. The first dimension of each tensor in the
list corresponds to batch, and must be the same size for each tensor.
plot_func: A function with signature f(fig, axes, data1, data2, ...,
datan, \*\*kwargs) that will be called with the realized data from
tensor_list to plot data on axes in fig. This function is called
independently on each element of the batch. Overrides plot_func passed
in to the constructor.
**kwargs: A dict of additional non-tensor keyword args to pass to
plot_func when generating the plot, overridding any
shared_subplot_kwargs. Useful for e.g. specifying a subplot's title.
"""
merged_kwargs = dict(self._shared_subplot_kwargs, **kwargs)
if plot_func is None:
plot_func = self._plot_func
plot_func = functools.partial(plot_func, **merged_kwargs)
self._subplots.append(_SubplotMetadata(tensor_list, plot_func))
def Finalize(self):
"""Finishes creation of the overall figure, returning the image summary."""
subplot_grid_shape = self._subplot_grid_shape
if subplot_grid_shape is None:
subplot_grid_shape = (len(self._subplots), 1)
# AddMatplotlibFigureSummary (due to restrictions of py_func) only supports
# flattened list of tensors so we must do some bookkeeping to maintain a
# mapping from _SubplotMetadata object to flattened_tensors.
subplot_slices = []
flattened_tensors = []
for subplot in self._subplots:
start = len(flattened_tensors)
subplot_slices.append((start, start + len(subplot.tensor_list)))
flattened_tensors.extend(subplot.tensor_list)
def PlotFunc(fig, *numpy_data_list):
gs = gridspec.GridSpec(*subplot_grid_shape, **self._gridspec_kwargs)
for n, subplot in enumerate(self._subplots):
axes = fig.add_subplot(gs[n])
start, end = subplot_slices[n]
subplot_data = numpy_data_list[start:end]
subplot.plot_func(fig, axes, *subplot_data)
func = functools.partial(_RenderMatplotlibFigures, self._figsize,
self._max_outputs, PlotFunc)
batch_sizes = [tf.shape(t)[0] for t in flattened_tensors]
num_tensors = len(flattened_tensors)
with tf.control_dependencies([
tf.assert_equal(
batch_sizes, [batch_sizes[0]] * num_tensors, summarize=num_tensors)
]):
rendered = tf.py_func(
func, flattened_tensors, tf.uint8, name='RenderMatplotlibFigures')
return tf.summary.image(self._name, rendered, max_outputs=self._max_outputs)
def _RenderOneMatplotlibFigure(fig, plot_func, *numpy_data_list):
fig.clear()
plot_func(fig, *numpy_data_list)
fig.canvas.draw()
ncols, nrows = fig.canvas.get_width_height()
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
return image.reshape(nrows, ncols, 3)
def _RenderMatplotlibFigures(figsize, max_outputs, plot_func, *numpy_data_list):
r"""Renders a figure containing several subplots using matplotlib.
This is an internal implementation detail of MatplotlibFigureSummary.Finalize
and should not be called directly.
The unconventional function signature is used to work around the behavior of
`tf.py_func` which always passes in different tensors as positional arguments.
Args:
figsize: A 2D tuple containing the overall figure (width, height) dimensions
in inches.
max_outputs: The maximum number of images to generate.
plot_func: A function with signature f(fig, data1, data2, ..., datan) that
will be called with \*numpy_data_list to plot data in fig.
*numpy_data_list: A list of numpy matrices to plot specified as separate
arguments.
Returns:
A numpy 4D array of type np.uint8 which can be used to generate a
`tf.math.image_summary` when converted to a tf tensor.
"""
batch_size = numpy_data_list[0].shape[0]
max_outputs = min(max_outputs, batch_size)
images = []
# Use plt.Figure instead of plt.figure to avoid a memory leak (matplotlib
# keeps global references to every figure created with plt.figure). When not
# using plt.figure we have to create a canvas manually.
fig = plt.Figure(figsize=figsize, dpi=100, facecolor='white')
backend_agg.FigureCanvasAgg(fig)
for b in range(max_outputs):
data = [numpy_data[b] for numpy_data in numpy_data_list]
try:
images.append(_RenderOneMatplotlibFigure(fig, plot_func, *data))
except Exception as e: # pylint: disable=broad-except
tf.logging.warning('Error rendering example %d using matplotlib: %s\n%s',
b, e, traceback.format_exc())
if len(images) == max_outputs:
break
plt.close(fig)
# Pad with dummy black images in case there were too many rendering errors.
while len(images) < max_outputs:
image_shape = (1, 1, 1)
if images:
image_shape = images[0].shape
images.append(np.ones(image_shape, dtype=np.uint8))
return np.array(images)
def FigureToSummary(name, fig):
"""Create tf.Summary proto from matplotlib.figure.Figure.
Args:
name: Summary name.
fig: A matplotlib figure object.
Returns:
A `tf.Summary` proto containing the figure rendered to an image.
"""
canvas = backend_agg.FigureCanvasAgg(fig)
fig.canvas.draw()
ncols, nrows = fig.canvas.get_width_height()
png_file = six.BytesIO()
canvas.print_figure(png_file)
png_str = png_file.getvalue()
return tf.Summary(value=[
tf.Summary.Value(
tag='%s/image' % name,
image=tf.Summary.Image(
height=nrows,
width=ncols,
colorspace=3,
encoded_image_string=png_str))
])
def Image(name, figsize, image, setter=None, **kwargs):
"""Plot an image in numpy and generates tf.Summary proto for it.
Args:
name: Image summary name.
figsize: A 2D tuple containing the overall figure (width, height) dimensions
in inches.
image: A 2D/3D numpy array in the format accepted by pyplot.imshow.
setter: A callable taking (fig, axes). Useful to fine-tune layout of the
figure, xlabel, xticks, etc.
**kwargs: Additional arguments to AddImage.
Returns:
A `tf.Summary` proto contains one image visualizing 'image.
"""
assert image.ndim in (2, 3), '%s' % image.shape
fig = plt.Figure(figsize=figsize, dpi=100, facecolor='white')
axes = fig.add_subplot(1, 1, 1)
# Default show_colorbar to False if not explicitly specified.
show_colorbar = kwargs.pop('show_colorbar', False)
# Default origin to 'upper' if not explicitly specified.
origin = kwargs.pop('origin', 'upper')
AddImage(
fig, axes, image, origin=origin, show_colorbar=show_colorbar, **kwargs)
if setter:
setter(fig, axes)
return FigureToSummary(name, fig)
def Scatter(name, figsize, xs, ys, setter=None, **kwargs):
"""Plot a scatter plot in numpy and generates tf.Summary proto for it.
Args:
name: Scatter plot summary name.
figsize: A 2D tuple containing the overall figure (width, height) dimensions
in inches.
xs: A set of x points to plot.
ys: A set of y points to plot.
setter: A callable taking (fig, axes). Useful to fine-tune layout of the
figure, xlabel, xticks, etc.
**kwargs: Additional arguments to AddScatterPlot.
Returns:
A `tf.Summary` proto contains one image visualizing 'image.
"""
fig = plt.Figure(figsize=figsize, dpi=100, facecolor='white')
# If z data is provided, use 3d projection.
#
# This requires the mplot3d toolkit (e.g., from mpl_toolkits import mplot3d)
# to be registered in the program.
if 'zs' in kwargs:
axes = fig.add_subplot(111, projection='3d')
else:
axes = fig.add_subplot(1, 1, 1)
AddScatterPlot(fig, axes, xs, ys, **kwargs)
if setter:
setter(fig, axes)
return FigureToSummary(name, fig)
Matrix = Image # pylint: disable=invalid-name
def Curve(name, figsize, xs, ys, setter=None, **kwargs):
"""Plot curve(s) to a `tf.Summary` proto.
Args:
name: Image summary name.
figsize: A 2D tuple containing the overall figure (width, height) dimensions
in inches.
xs: x values for matplotlib.pyplot.plot.
ys: y values for matplotlib.pyplot.plot.
setter: A callable taking (fig, axes). Useful to fine-control layout of the
figure, xlabel, xticks, etc.
**kwargs: Extra args for matplotlib.pyplot.plot.
Returns:
A `tf.Summary` proto contains the line plot.
"""
fig = plt.Figure(figsize=figsize, dpi=100, facecolor='white')
axes = fig.add_subplot(1, 1, 1)
axes.plot(xs, ys, '.-', **kwargs)
if setter:
setter(fig, axes)
return FigureToSummary(name, fig)
def AddMultiCurveSubplot(fig,
tensors,
paddings,
labels,
xlabels=None,
**kwargs):
"""Adds a multi curve subplot to Matplotlib figure.
Plots one line for each entry in tensors and assigns a plot label legend.
Args:
fig: The Matplotlib figure.
tensors: List of tensors of shape [batch, length]
paddings: Paddings for 'tensors' with shape [batch, length] with 0. in valid
positions and 1. in invalid.
labels: A list of tensor names (strings) of the same length as 'tensors'.
xlabels: A string tensor of shape [batch] with an xlabel per batch.
**kwargs: With optional, title, xlabel, ylabel, fontsize.
"""
data = []
row_labels = []
for t, l in zip(tensors, labels):
if t is not None:
data.append(py_utils.ApplyPadding(paddings, t))
row_labels.append(l)
shape = py_utils.GetShape(data[0], 2)
data = tf.reshape(tf.concat(data, -1), [shape[0], len(data), shape[1]])
args = [data, py_utils.LengthsFromPaddings(paddings)]
if xlabels is not None:
args.append(xlabels)
fig.AddSubplot(
args, plot_func=_AddMultiCurveRowPlots, row_labels=row_labels, **kwargs)
def _AddMultiCurveRowPlots(fig,
axes,
data,
length,
x_label_override=None,
row_labels=None,
title=u'',
xlabel=u'',
ylabel=u'',
fontsize='small'):
"""Add a plot per row in data and cut the plot by the length."""
del fig
colors = ['b-', 'r-', 'g-', 'm-', 'y-']
for row in range(data.shape[0]):
label = row_labels[row] if row_labels else '{}'.format(row)
axes.plot(data[row, :length], colors[row % len(colors)], label=label)
axes.set_xlim([0, length])
axes.legend()
axes.set_title(ToUnicode(title), size=fontsize)
if x_label_override:
axes.set_xlabel(ToUnicode(x_label_override), size='x-small', wrap=True)
else:
axes.set_xlabel(ToUnicode(xlabel), size=fontsize)
axes.set_ylabel(ToUnicode(ylabel), size=fontsize)
|
apache-2.0
|
adrn/streams
|
scripts/spitzer_targets.py
|
1
|
20740
|
# coding: utf-8
""" Select targets for Spitzer """
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os, sys
from datetime import datetime, timedelta
# Third-party
import astropy.coordinates as coord
import astropy.units as u
from astropy.io import ascii
from astropy.io.misc import fnpickle, fnunpickle
from scipy.interpolate import interp1d
from astropy.table import Table, Column, join, vstack
import matplotlib.pyplot as plt
import numpy as np
# Project
from streams.util import project_root
from streams.coordinates import distance_to_sgr_plane
from streams.io import add_sgr_coordinates
from streams.io.lm10 import particle_table
notes_path = os.path.join(project_root, "text", "notes",
"spitzer_target_selection")
def orphan():
""" We'll select the high probability members from Branimir's
Orphan sample.
"""
filename = "branimir_orphan.txt"
output_file = "orphan.txt"
d = ascii.read(os.path.join(project_root, "data", "catalog", filename))
high = d[d["membership_probability"] == "high"]
high.keep_columns(["ID", "RA", "Dec", "magAvg", "period", "rhjd0"])
high.rename_column("magAvg", "rMagAvg")
ascii.write(high, \
os.path.join(project_root, "data", "spitzer_targets", output_file))
def tbl_to_xyz(tbl):
g = coord.ICRS(np.array(tbl["ra"])*u.deg, np.array(tbl["dec"])*u.deg,
distance=np.array(tbl["dist"])*u.kpc).galactic
return g.x-8*u.kpc, g.y, g.z
def tbl_to_gc_dist(tbl):
x,y,z = tbl_to_xyz(tbl)
return np.sqrt(x**2 + y**2 + z**2)
def sgr_rv(d, lm10, Nbins=30, sigma_cut=3.):
""" Select stars (d) that match in RV to the LM10 particles """
# for each arm (trailing, leading)
rv_funcs = {}
median_rvs = {}
Lambda_bins = {}
rv_scatters = {}
lmflag_idx = {}
for lmflag in [-1,1]:
wrap = lm10[lm10['Lmflag'] == lmflag]
bins = np.linspace(wrap["Lambda"].min(), wrap["Lambda"].max(), Nbins)
median_rv = []
rv_scatter = []
for ii in range(Nbins-1):
binL = bins[ii]
binR = bins[ii+1]
idx = (wrap["Lambda"] > binL) & (wrap["Lambda"] < binR)
median_rv.append(np.median(wrap["vgsr"][idx]))
rv_scatter.append(np.std(wrap["vgsr"][idx]))
# plt.clf()
# plt.hist(wrap["vgsr"][idx])
# plt.title("{0} - {1}".format(binL, binR))
# plt.xlim(-250, 250)
# plt.savefig(os.path.join(notes_path, "{0}_{1}.png".format(lmflag, int(binL))))
Lambda_bins[lmflag] = (bins[1:]+bins[:-1])/2.
median_rvs[lmflag] = np.array(median_rv)
rv_scatters[lmflag] = np.array(rv_scatter)
rv_func = interp1d(Lambda_bins[lmflag],
median_rvs[lmflag],
kind='cubic', bounds_error=False)
_idx = np.zeros_like(d["Lambda"]).astype(bool)
for ii in range(Nbins-1):
lbin = bins[ii]
rbin = bins[ii+1]
ix = (d["Lambda"] >= lbin) | (d["Lambda"] < rbin)
pred_rv = rv_func(d["Lambda"])
ix &= np.fabs(d["Vgsr"] - pred_rv) < np.sqrt(10**2 + rv_scatters[lmflag][ii]**2)
_idx |= ix
lmflag_idx[lmflag] = _idx
fig,axes = plt.subplots(1,2,figsize=(15,6))
for ii,lmflag in enumerate([1,-1]):
ax = axes[ii]
wrap = lm10[lm10['Lmflag'] == lmflag]
ax.plot(wrap["Lambda"], wrap["vgsr"],
marker=',', linestyle='none', alpha=0.25)
ax.plot(Lambda_bins[lmflag], median_rvs[lmflag], "k")
ax.plot(Lambda_bins[lmflag],
median_rvs[lmflag]+sigma_cut*rv_scatters[lmflag], c='g')
ax.plot(Lambda_bins[lmflag],
median_rvs[lmflag]-sigma_cut*rv_scatters[lmflag], c='g')
selected_d = d[lmflag_idx[lmflag]]
not_selected_d = d[~lmflag_idx[lmflag]]
ax.plot(selected_d["Lambda"], selected_d["Vgsr"],
marker='.', linestyle='none', alpha=0.75, c='#CA0020', ms=6)
ax.plot(not_selected_d["Lambda"], not_selected_d["Vgsr"],
marker='.', linestyle='none', alpha=0.75, c='#2B8CBE', ms=5)
ax.set_xlim(0,360)
ax.set_xlabel(r"$\Lambda$ [deg]")
if ii == 0:
ax.set_ylabel(r"$v_{\rm gsr}$ [km/s]")
if lmflag == 1:
ax.set_title("Leading", fontsize=20, fontweight='normal')
elif lmflag == -1:
ax.set_title("Trailing", fontsize=20, fontweight='normal')
fig.tight_layout()
fig.savefig(os.path.join(notes_path, "vgsr_selection.pdf"))
return lmflag_idx
def sgr_dist(d, lm10, Nbins=30, sigma_cut=3.):
""" Select stars (d) that match in distance to the LM10 particles """
# for each arm (trailing, leading)
lmflag_idx = {}
median_dists = {}
Lambda_bins = {}
dist_scatters = {}
for lmflag in [-1,1]:
wrap = lm10[lm10['Lmflag'] == lmflag]
dist = wrap["dist"]
bins = np.linspace(wrap["Lambda"].min(), wrap["Lambda"].max(), Nbins)
median_dist = []
dist_scatter = []
for ii,binL in enumerate(bins):
if ii == Nbins-1: break
binR = bins[ii+1]
idx = (wrap["Lambda"] > binL) & (wrap["Lambda"] < binR)
m = np.median(dist[idx])
s = np.std(dist[idx])
if 300 > binL > 225 and lmflag == 1:
m -= 5.
s *= 1.
elif 90 > binL > 40 and lmflag == 1:
s /= 2.
dist_scatter.append(s)
median_dist.append(m)
Lambda_bins[lmflag] = (bins[1:]+bins[:-1])/2.
median_dists[lmflag] = np.array(median_dist)
dist_scatters[lmflag] = np.array(dist_scatter)
dist_func = interp1d(Lambda_bins[lmflag],
median_dists[lmflag],
kind='cubic', bounds_error=False)
scatter_func = interp1d(Lambda_bins[lmflag],
dist_scatters[lmflag],
kind='cubic', bounds_error=False)
_idx = np.zeros_like(d["Lambda"]).astype(bool)
for ii in range(Nbins-1):
lbin = bins[ii]
rbin = bins[ii+1]
ix = (d["Lambda"] >= lbin) | (d["Lambda"] < rbin)
pred_dist = dist_func(d["Lambda"])
pred_scat = scatter_func(d["Lambda"])
ix &= np.fabs(d["dist"] - pred_dist) < sigma_cut*pred_scat
#dist_scatters[lmflag][ii]
_idx |= ix
lmflag_idx[lmflag] = _idx
fig,axes = plt.subplots(1,2,figsize=(15,6))
for ii,lmflag in enumerate([1,-1]):
ax = axes[ii]
wrap = lm10[lm10['Lmflag'] == lmflag]
ax.plot(wrap["Lambda"], wrap["dist"],
marker=',', linestyle='none', alpha=0.25)
ax.plot(Lambda_bins[lmflag], median_dists[lmflag], "k")
ax.plot(Lambda_bins[lmflag],
median_dists[lmflag]+sigma_cut*dist_scatters[lmflag], c='g')
ax.plot(Lambda_bins[lmflag],
median_dists[lmflag]-sigma_cut*dist_scatters[lmflag], c='g')
selected_d = d[lmflag_idx[lmflag]]
not_selected_d = d[~lmflag_idx[lmflag]]
ax.plot(selected_d["Lambda"], selected_d["dist"],
marker='.', linestyle='none', alpha=0.75, c='#CA0020', ms=6)
ax.plot(not_selected_d["Lambda"], not_selected_d["dist"],
marker='.', linestyle='none', alpha=0.75, c='#2B8CBE', ms=5)
ax.set_xlim(0,360)
ax.set_xlabel(r"$\Lambda$ [deg]")
if ii == 0:
ax.set_ylabel(r"$d_{\odot}$ [kpc]")
if lmflag == 1:
ax.set_title("Leading", fontsize=20, fontweight='normal')
elif lmflag == -1:
ax.set_title("Trailing", fontsize=20, fontweight='normal')
fig.tight_layout()
fig.savefig(os.path.join(notes_path, "dist_selection.pdf"))
return lmflag_idx
def select_only(ixx, N):
w, = np.where(ixx)
ix = np.zeros_like(ixx).astype(bool)
np.random.shuffle(w)
try:
w = w[:N]
ix[w] = True
except:
ix = np.ones_like(ixx).astype(bool)
return ix
def integration_time(d):
d = np.array(d)
f = d/10. #kpc
return (3.*f**2*12*u.minute).to(u.hour)
def sgr(overwrite=False, seed=42):
np.random.seed(seed)
lm10_cache = os.path.join(project_root, "data", "spitzer_targets",
"lm10_cache.pickle")
if os.path.exists(lm10_cache) and overwrite:
os.remove(lm10_cache)
if not os.path.exists(lm10_cache):
# select particle data from the LM10 simulation
lm10 = particle_table(N=0, expr="(Pcol>-1) & (Pcol<8) & "\
"(abs(Lmflag)==1) & (dist<100)")
fnpickle(np.array(lm10), lm10_cache)
else:
lm10 = Table(fnunpickle(lm10_cache))
# read in the Catalina RR Lyrae data
spatial_data = ascii.read(os.path.join(project_root,
"data/catalog/Catalina_all_RRLyr.txt"))
velocity_data = ascii.read(os.path.join(project_root,
"data/catalog/Catalina_vgsr_RRLyr.txt"))
catalina = join(spatial_data, velocity_data, join_type='outer', keys="ID")
catalina.rename_column("RAdeg", "ra")
catalina.rename_column("DEdeg", "dec")
catalina.rename_column("dh", "dist")
# add Sgr coordinates to the Catalina data
catalina = add_sgr_coordinates(catalina)
# add Galactocentric distance to the Catalina and LM10 data
cat_gc_dist = tbl_to_gc_dist(catalina)
lm10_gc_dist = tbl_to_gc_dist(lm10)
catalina.add_column(Column(cat_gc_dist, name="gc_dist"))
lm10.add_column(Column(lm10_gc_dist, name="gc_dist"))
# 1) Select stars < 20 kpc from the orbital plane of Sgr
sgr_catalina = catalina[np.fabs(catalina["Z_sgr"]) < 20.]
x,y,z = tbl_to_xyz(sgr_catalina)
# 2) Stars with D > 15 kpc from the Galactic center
sgr_catalina = sgr_catalina[sgr_catalina["gc_dist"] > 15]
sgr_catalina_rv = sgr_catalina[~sgr_catalina["Vgsr"].mask]
print("{0} CSS RRLs have radial velocities.".format(len(sgr_catalina_rv)))
# plot X-Z plane, data and lm10 particles
fig,axes = plt.subplots(1,3,figsize=(15,6), sharex=True, sharey=True)
x,y,z = tbl_to_xyz(catalina)
axes[0].set_title("All RRL", fontsize=20)
axes[0].plot(x, z, marker='.', alpha=0.2, linestyle='none')
axes[0].set_xlabel("$X_{gc}$ kpc")
axes[0].set_ylabel("$Z_{gc}$ kpc")
x,y,z = tbl_to_xyz(sgr_catalina)
axes[1].set_title(r"RRL $|Z-Z_{sgr}|$ $<$ $20$ kpc", fontsize=20)
axes[1].plot(x, z, marker='.', alpha=0.2, linestyle='none')
axes[2].plot(lm10["x"], lm10["z"], marker='.',
alpha=0.2, linestyle='none')
axes[2].set_title("LM10", fontsize=20)
axes[2].set_xlim(-60, 40)
axes[2].set_ylim(-60, 60)
fig.tight_layout()
fig.savefig(os.path.join(notes_path, "catalina_all.pdf"))
# plot Lambda-dist plane, data and lm10 particles
fig,ax = plt.subplots(1,1,figsize=(6,6),
subplot_kw=dict(projection="polar"))
ax.set_theta_direction(-1)
ax.plot(np.radians(lm10["Lambda"]), lm10["dist"],
marker='.', alpha=0.2, linestyle='none')
ax.plot(np.radians(sgr_catalina["Lambda"]), sgr_catalina["dist"],
marker='.', alpha=0.75, linestyle='none', ms=6)
fig.tight_layout()
fig.savefig(os.path.join(notes_path, "rrl_over_lm10.pdf"))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Select on radial velocities
rv_selection_cache = os.path.join(project_root, "data",
"spitzer_targets", "rv.pickle")
if os.path.exists(rv_selection_cache) and overwrite:
os.remove(rv_selection_cache)
if not os.path.exists(rv_selection_cache):
lmflag_rv_idx = sgr_rv(sgr_catalina_rv, lm10, Nbins=40, sigma_cut=3.)
fnpickle(lmflag_rv_idx, rv_selection_cache)
else:
lmflag_rv_idx = fnunpickle(rv_selection_cache)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Select on distance
_selection_cache = os.path.join(project_root, "data",
"spitzer_targets", "dist.pickle")
if os.path.exists(_selection_cache) and overwrite:
os.remove(_selection_cache)
if not os.path.exists(_selection_cache):
lmflag_dist_idx = sgr_dist(sgr_catalina_rv, lm10,
Nbins=40, sigma_cut=3.)
fnpickle(lmflag_dist_idx, _selection_cache)
else:
lmflag_dist_idx = fnunpickle(_selection_cache)
################################################################
# Make X-Z plot
fig,ax = plt.subplots(1,1,figsize=(6,6))
x,y,z = tbl_to_xyz(lm10)
ax.plot(x, z, marker=',', alpha=0.2,
linestyle='none')
for lmflag in [1,-1]:
ix = lmflag_dist_idx[lmflag] & lmflag_rv_idx[lmflag]
x,y,z = tbl_to_xyz(sgr_catalina_rv[ix])
ax.plot(x, z, marker='.', alpha=0.75,
linestyle='none', ms=6, label="Lmflag={0}".format(lmflag))
ax.legend(loc='lower right',\
prop={'size':12})
ax.set_title("RV-selected CSS RRLs", fontsize=20)
ax.set_xlabel(r"$X_{\rm gc}$ kpc")
ax.set_ylabel(r"$Z_{\rm gc}$ kpc")
fig.tight_layout()
fig.savefig(os.path.join(notes_path, "selected_xz.pdf"))
##############################################################
# Finalize two samples:
# - 1 with 10 stars in the nearby trailing wrap
# - 1 without these stars
L = sgr_catalina_rv["Lambda"]
B = sgr_catalina_rv["Beta"]
D = sgr_catalina_rv["dist"]
X,Y = D*np.cos(np.radians(L)),D*np.sin(np.radians(L))
lead_ix = lmflag_dist_idx[1] & lmflag_rv_idx[1] & (np.fabs(B) < 40)
trail_ix = lmflag_dist_idx[-1] & lmflag_rv_idx[-1] & (np.fabs(B) < 40)
trail_ix[L < 180] &= B[L < 180] > -5
trail_ix = trail_ix & ( ((L > 230) & (L < 315)) | (L < 180) )
#trail_ix &= np.logical_not((L > 50) & (L < 100) & (sgr_catalina_rv["dist"] < 40))
# draw a box around some possible bifurcation members
bif_ix = (L > 200) & (L < 230) & (B < 2) & (B > -10) & lead_ix
# deselect stars possibly associated with the bifurcation
no_bif = (L > 180) & (L < 225) & (B < 20) & (B > 2)
no_bif |= (L > 225) & (L < 360) & (B < 17) & (B > 2)
no_bif |= ((L <= 180) & (B < 15) & (B > -8) & (L > 50))
lead_ix &= no_bif
print(sum(bif_ix), "bifurcation stars")
print(sum(lead_ix), "leading arm stars")
print(sum(trail_ix), "trailing arm stars ")
# select 3 clumps in the leading arm
Nclump = 11
ix_lead_clumps = np.zeros_like(lead_ix).astype(bool)
for clump in [(215,17), (245,25), (260,40)]:
l,d = clump
x,y = d*np.cos(np.radians(l)),d*np.sin(np.radians(l))
# find N stars closest to the clump
clump_dist = np.sqrt((X-x)**2 + (Y-y)**2)
xxx = np.sort(clump_dist[lead_ix])[Nclump]
this_ix = lead_ix & (clump_dist <= xxx)
print("lead",sum(this_ix))
ix_lead_clumps |= this_ix #select_only(this_ix, 10)
ix_lead_clumps &= lead_ix
# all southern leading
lll = ((L > 45) & (L < 180) & lead_ix)
print("southern leading", sum(lll))
ix_lead_clumps |= lll
ix_trail_clumps = np.zeros_like(trail_ix).astype(bool)
for clump in [(260,19)]:
l,d = clump
x,y = d*np.cos(np.radians(l)),d*np.sin(np.radians(l))
# find N stars closest to the clump
clump_dist = np.sqrt((X-x)**2 + (Y-y)**2)
xxx = np.sort(clump_dist[trail_ix])[10]
this_ix = trail_ix & (clump_dist <= xxx)
#bonus_trail_ix = trail_ix & (clump_dist > xxx)
ix_trail_clumps |= this_ix #select_only(this_ix, 10)
ix_trail_clumps &= trail_ix
# all trailing southern
ttt = (L > 45) & (L < 180) & (trail_ix)
print("southern trailing", sum(ttt))
ix_trail_clumps |= ttt
i1 = integration_time(sgr_catalina_rv[bif_ix]["dist"])
i2 = integration_time(sgr_catalina_rv[ix_lead_clumps]["dist"])
i3 = integration_time(sgr_catalina_rv[ix_trail_clumps]["dist"])
print("final num bifurcation", len(sgr_catalina_rv[bif_ix]))
print("final num leading arm", len(sgr_catalina_rv[ix_lead_clumps]))
print("final num trailing arm",len(sgr_catalina_rv[ix_trail_clumps]))
print("final total", sum(ix_trail_clumps) + sum(ix_lead_clumps) + sum(bif_ix))
print("final total", sum(ix_trail_clumps | ix_lead_clumps | bif_ix))
targets = sgr_catalina_rv[ix_trail_clumps | ix_lead_clumps | bif_ix]
bonus_targets = sgr_catalina_rv[(lead_ix | trail_ix) & \
~(ix_trail_clumps | ix_lead_clumps | bif_ix)]
# print()
# print("bifurcation", np.sum(i1))
# print("leading",np.sum(i2))
# print("trailing",np.sum(i3))
# print("Total:",np.sum(integration_time(targets["dist"])))
tot_time = 0.
for t in targets:
Vmag = t["<Vmag>"]
if Vmag < 16.6:
tot_time += 1.286
elif 16.6 < Vmag < 16.8:
tot_time += 1.31
elif 16.8 < Vmag < 17.2:
tot_time += 1.83
elif 17.2 < Vmag < 17.8:
tot_time += 2.52
elif Vmag > 17.8:
tot_time += 4.34
print ("total time: ", tot_time)
output_file = "sgr.txt"
output = targets.copy()
output.rename_column("Eta", "hjd0")
output.rename_column("<Vmag>", "VMagAvg")
output.keep_columns(["ID", "ra", "dec", "VMagAvg", "Period", "hjd0"])
ascii.write(output,
os.path.join(project_root, "data", "spitzer_targets", output_file), Writer=ascii.Basic)
output_file = "sgr_bonus.txt"
output = bonus_targets.copy()
output.rename_column("Eta", "hjd0")
output.rename_column("<Vmag>", "VMagAvg")
output.keep_columns(["ID", "ra", "dec", "VMagAvg", "Period", "hjd0"])
ascii.write(output,
os.path.join(project_root, "data", "spitzer_targets", output_file), Writer=ascii.Basic)
# ----------------------------------
# Make Lambda-D plot
fig,ax = plt.subplots(1,1,figsize=(6,6),
subplot_kw=dict(projection="polar"))
ax.set_theta_direction(-1)
ax.plot(np.radians(lm10["Lambda"]), lm10["dist"],
marker=',', alpha=0.2, linestyle='none')
d = sgr_catalina_rv[lead_ix]
ax.plot(np.radians(d["Lambda"]), d["dist"], marker='.',
alpha=0.75, linestyle='none', ms=8, c="#CA0020", label="leading")
d = sgr_catalina_rv[trail_ix]
ax.plot(np.radians(d["Lambda"]), d["dist"], marker='.',
alpha=0.75, linestyle='none', ms=8, c="#5E3C99", label="trailing")
ax.plot(np.radians(targets["Lambda"]), targets["dist"], marker='.',
alpha=0.7, linestyle='none', ms=10, c="#31A354", label="targets",
mfc='none', mec='k', mew=1.5)
ax.legend(loc="lower right")
plt.setp(ax.get_legend().get_texts(), fontsize='12')
ax.set_ylim(0,65)
fig.tight_layout()
fig.savefig(os.path.join(notes_path, "xz.pdf"))
# ---------------------
# Make Lambda-Beta plot
fig,ax = plt.subplots(1,1,figsize=(12,5))
ax.plot(lm10["Lambda"], lm10["Beta"], marker=',', alpha=0.2,
linestyle='none')
dd_bif = sgr_catalina_rv[bif_ix]
ax.plot(dd_bif["Lambda"], dd_bif["Beta"], marker='.', alpha=0.75,
linestyle='none', ms=6, c="#31A354", label="bifurcation")
d = sgr_catalina_rv[lead_ix]
ax.plot(d["Lambda"], d["Beta"], marker='.',
alpha=0.75, linestyle='none', ms=8, c="#CA0020", label="leading")
d = sgr_catalina_rv[trail_ix]
ax.plot(d["Lambda"], d["Beta"], marker='.',
alpha=0.75, linestyle='none', ms=8, c="#5E3C99", label="trailing")
ax.plot(targets["Lambda"], targets["Beta"], marker='.',
alpha=0.7, linestyle='none', ms=10, c="#31A354", label="targets",
mfc='none', mec='k', mew=1.5)
ax.legend(loc="lower right")
plt.setp(ax.get_legend().get_texts(), fontsize='12')
ax.set_title("RV-selected CSS RRLs", fontsize=20)
ax.set_xlabel(r"$\Lambda$ [deg]")
ax.set_ylabel(r"$B$ [deg]")
ax.set_xlim(360, 0)
ax.set_ylim(-45, 45)
fig.tight_layout()
fig.savefig(os.path.join(notes_path, "LB.pdf"))
if __name__ == "__main__":
from argparse import ArgumentParser
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-o", "--overwrite", action="store_true",
dest="overwrite", default=False)
args = parser.parse_args()
#orphan()
sgr(args.overwrite)
|
mit
|
abhishekgahlot/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
7
|
24079
|
"""
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal)
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.datasets.samples_generator import make_classification
from sklearn.metrics import f1_score
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
"""
Test parameters on classes that make use of libsvm.
"""
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
"""Check consistency on dataset iris."""
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
"""
Test whether SVCs work on a single sample given as a 1-d array
"""
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
"""
SVC with a precomputed kernel.
We test it with a toy dataset and with iris.
"""
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
"""
Test Support Vector Regression
"""
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.)):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
"""
Test OneClassSVM
"""
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
"""
Test OneClassSVM decision function
"""
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
"""
Make sure some tweaking of parameters works.
We change clf.dual_coef_ at run time and expect .predict() to change
accordingly. Notice that this is not trivial since it involves a lot
of C/Python copying in the libsvm bindings.
The success of this test ensures that the mapping between libsvm and
the python classifier is complete.
"""
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[.25, -.25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf.dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
"""
Predict probabilities using SVC
This uses cross validation, so we use a slightly bigger testing set.
"""
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
"""
Test decision_function
Sanity check, test that decision_function implemented in python
returns the same as the one in libsvm
"""
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_weight():
"""
Test class weights
"""
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
"""
Test weights on individual samples
"""
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
"""Test class weights for imbalanced data"""
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="auto"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('auto', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='auto' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='auto')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred)
<= metrics.f1_score(y, y_pred_balanced))
def test_bad_input():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
"""
Test possible parameter combinations in LinearSVC
"""
# generate list of possible parameter combinations
params = [(dual, loss, penalty) for dual in [True, False]
for loss in ['l1', 'l2', 'lr'] for penalty in ['l1', 'l2']]
X, y = make_classification(n_samples=5, n_features=5)
for dual, loss, penalty in params:
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if (loss == 'l1' and penalty == 'l1') or (
loss == 'l1' and penalty == 'l2' and not dual) or (
penalty == 'l1' and dual):
assert_raises(ValueError, clf.fit, X, y)
else:
clf.fit(X, y)
def test_linearsvc():
"""
Test basic routines using LinearSVC
"""
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='l1', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
"""Test LinearSVC with crammer_singer multi-class svm"""
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
"""Test Crammer-Singer formulation in the binary case"""
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
"""
Test that LinearSVC gives plausible predictions on the iris dataset
Also, test symbolic class names (classes_).
"""
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
"""
Test that dense liblinear honours intercept_scaling param
"""
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='l2',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
"""Check that primal coef modification are not silently ignored"""
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_inheritance():
# check that SVC classes can do inheritance
class ChildSVC(svm.SVC):
def __init__(self, foo=0):
self.foo = foo
svm.SVC.__init__(self)
clf = ChildSVC()
clf.fit(iris.data, iris.target)
clf.predict(iris.data[-1])
clf.decision_function(iris.data[-1])
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0)
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
"""Test that warnings are raised if model does not converge"""
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
"""Test that SVR(kernel="linear") has coef_ with the right sign."""
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for klass in [svm.SVR, svm.NuSVR]:
svr = klass(kernel="linear").fit(X, y)
assert_array_almost_equal(svr.predict(X).reshape(-1, 1),
np.dot(X, svr.coef_.T) + svr.intercept_)
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
TianpeiLuke/GPy
|
GPy/testing/rv_transformation_tests.py
|
2
|
3523
|
# Written by Ilias Bilionis
"""
Test if hyperparameters in models are properly transformed.
"""
import unittest
import numpy as np
import scipy.stats as st
import GPy
class TestModel(GPy.core.Model):
"""
A simple GPy model with one parameter.
"""
def __init__(self):
GPy.core.Model.__init__(self, 'test_model')
theta = GPy.core.Param('theta', 1.)
self.link_parameter(theta)
def log_likelihood(self):
return 0.
class RVTransformationTestCase(unittest.TestCase):
def _test_trans(self, trans):
m = TestModel()
prior = GPy.priors.LogGaussian(.5, 0.1)
m.theta.set_prior(prior)
m.theta.unconstrain()
m.theta.constrain(trans)
# The PDF of the transformed variables
p_phi = lambda(phi): np.exp(-m._objective_grads(phi)[0])
# To the empirical PDF of:
theta_s = prior.rvs(100000)
phi_s = trans.finv(theta_s)
# which is essentially a kernel density estimation
kde = st.gaussian_kde(phi_s)
# We will compare the PDF here:
phi = np.linspace(phi_s.min(), phi_s.max(), 100)
# The transformed PDF of phi should be this:
pdf_phi = np.array([p_phi(p) for p in phi])
# UNCOMMENT TO SEE GRAPHICAL COMPARISON
#import matplotlib.pyplot as plt
#fig, ax = plt.subplots()
#ax.hist(phi_s, normed=True, bins=100, alpha=0.25, label='Histogram')
#ax.plot(phi, kde(phi), '--', linewidth=2, label='Kernel Density Estimation')
#ax.plot(phi, pdf_phi, ':', linewidth=2, label='Transformed PDF')
#ax.set_xlabel(r'transformed $\theta$', fontsize=16)
#ax.set_ylabel('PDF', fontsize=16)
#plt.legend(loc='best')
#plt.show(block=True)
# END OF PLOT
# The following test cannot be very accurate
self.assertTrue(np.linalg.norm(pdf_phi - kde(phi)) / np.linalg.norm(kde(phi)) <= 1e-1)
# Check the gradients at a few random points
for i in xrange(10):
m.theta = theta_s[i]
self.assertTrue(m.checkgrad(verbose=True))
def test_Logexp(self):
self._test_trans(GPy.constraints.Logexp())
self._test_trans(GPy.constraints.Exponent())
if __name__ == '__main__':
unittest.main()
quit()
m = TestModel()
prior = GPy.priors.LogGaussian(0., .9)
m.theta.set_prior(prior)
# The following should return the PDF in terms of the transformed quantities
p_phi = lambda(phi): np.exp(-m._objective_grads(phi)[0])
# Let's look at the transformation phi = log(exp(theta - 1))
trans = GPy.constraints.Exponent()
m.theta.constrain(trans)
# Plot the transformed probability density
phi = np.linspace(-8, 8, 100)
fig, ax = plt.subplots()
# Let's draw some samples of theta and transform them so that we see
# which one is right
theta_s = prior.rvs(10000)
# Transform it to the new variables
phi_s = trans.finv(theta_s)
# And draw their histogram
ax.hist(phi_s, normed=True, bins=100, alpha=0.25, label='Empirical')
# This is to be compared to the PDF of the model expressed in terms of these new
# variables
ax.plot(phi, [p_phi(p) for p in phi], label='Transformed PDF', linewidth=2)
ax.set_xlim(-3, 10)
ax.set_xlabel(r'transformed $\theta$', fontsize=16)
ax.set_ylabel('PDF', fontsize=16)
plt.legend(loc='best')
# Now let's test the gradients
m.checkgrad(verbose=True)
# And show the plot
plt.show(block=True)
|
bsd-3-clause
|
mementum/backtrader
|
backtrader/plot/finance.py
|
1
|
19346
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils.py3 import range, zip
import matplotlib.collections as mcol
import matplotlib.colors as mcolors
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
from .utils import shade_color
class CandlestickPlotHandler(object):
legend_opens = [0.50, 0.50, 0.50]
legend_highs = [1.00, 1.00, 1.00]
legend_lows = [0.00, 0.00, 0.00]
legend_closes = [0.80, 0.00, 1.00]
def __init__(self,
ax, x, opens, highs, lows, closes,
colorup='k', colordown='r',
edgeup=None, edgedown=None,
tickup=None, tickdown=None,
width=1, tickwidth=1,
edgeadjust=0.05, edgeshading=-10,
alpha=1.0,
label='_nolegend',
fillup=True,
filldown=True,
**kwargs):
# Manager up/down bar colors
r, g, b = mcolors.colorConverter.to_rgb(colorup)
self.colorup = r, g, b, alpha
r, g, b = mcolors.colorConverter.to_rgb(colordown)
self.colordown = r, g, b, alpha
# Manage the edge up/down colors for the bars
if edgeup:
r, g, b = mcolors.colorConverter.to_rgb(edgeup)
self.edgeup = ((r, g, b, alpha),)
else:
self.edgeup = shade_color(self.colorup, edgeshading)
if edgedown:
r, g, b = mcolors.colorConverter.to_rgb(edgedown)
self.edgedown = ((r, g, b, alpha),)
else:
self.edgedown = shade_color(self.colordown, edgeshading)
# Manage the up/down tick colors
if tickup:
r, g, b = mcolors.colorConverter.to_rgb(tickup)
self.tickup = ((r, g, b, alpha),)
else:
self.tickup = self.edgeup
if tickdown:
r, g, b = mcolors.colorConverter.to_rgb(tickdown)
self.tickdown = ((r, g, b, alpha),)
else:
self.tickdown = self.edgedown
self.barcol, self.tickcol = self.barcollection(
x, opens, highs, lows, closes,
width, tickwidth, edgeadjust,
label=label,
fillup=fillup, filldown=filldown,
**kwargs)
# add collections to the axis and return them
ax.add_collection(self.tickcol)
ax.add_collection(self.barcol)
# Update the axis
ax.update_datalim(((0, min(lows)), (len(opens), max(highs))))
ax.autoscale_view()
# Add self as legend handler for this object
mlegend.Legend.update_default_handler_map({self.barcol: self})
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0 = handlebox.xdescent
y0 = handlebox.ydescent
width = handlebox.width / len(self.legend_opens)
height = handlebox.height
# Generate the x axis coordinates (handlebox based)
xs = [x0 + width * (i + 0.5) for i in range(len(self.legend_opens))]
barcol, tickcol = self.barcollection(
xs,
self.legend_opens, self.legend_highs,
self.legend_lows, self.legend_closes,
width=width, tickwidth=2,
scaling=height, bot=y0)
barcol.set_transform(handlebox.get_transform())
handlebox.add_artist(barcol)
tickcol.set_transform(handlebox.get_transform())
handlebox.add_artist(tickcol)
return barcol, tickcol
def barcollection(self,
xs,
opens, highs, lows, closes,
width, tickwidth=1, edgeadjust=0,
label='_nolegend',
scaling=1.0, bot=0,
fillup=True, filldown=True,
**kwargs):
# Prepack different zips of the series values
oc = lambda: zip(opens, closes) # NOQA: E731
xoc = lambda: zip(xs, opens, closes) # NOQA: E731
iohlc = lambda: zip(xs, opens, highs, lows, closes) # NOQA: E731
colorup = self.colorup if fillup else 'None'
colordown = self.colordown if filldown else 'None'
colord = {True: colorup, False: colordown}
colors = [colord[o < c] for o, c in oc()]
edgecolord = {True: self.edgeup, False: self.edgedown}
edgecolors = [edgecolord[o < c] for o, c in oc()]
tickcolord = {True: self.tickup, False: self.tickdown}
tickcolors = [tickcolord[o < c] for o, c in oc()]
delta = width / 2 - edgeadjust
def barbox(i, open, close):
# delta seen as closure
left, right = i - delta, i + delta
open = open * scaling + bot
close = close * scaling + bot
return (left, open), (left, close), (right, close), (right, open)
barareas = [barbox(i, o, c) for i, o, c in xoc()]
def tup(i, open, high, close):
high = high * scaling + bot
open = open * scaling + bot
close = close * scaling + bot
return (i, high), (i, max(open, close))
tickrangesup = [tup(i, o, h, c) for i, o, h, l, c in iohlc()]
def tdown(i, open, low, close):
low = low * scaling + bot
open = open * scaling + bot
close = close * scaling + bot
return (i, low), (i, min(open, close))
tickrangesdown = [tdown(i, o, l, c) for i, o, h, l, c in iohlc()]
# Extra variables for the collections
useaa = 0, # use tuple here
lw = 0.5, # and here
tlw = tickwidth,
# Bar collection for the candles
barcol = mcol.PolyCollection(
barareas,
facecolors=colors,
edgecolors=edgecolors,
antialiaseds=useaa,
linewidths=lw,
label=label,
**kwargs)
# LineCollections have a higher zorder than PolyCollections
# to ensure the edges of the bars are not overwriten by the Lines
# we need to put the bars slightly over the LineCollections
kwargs['zorder'] = barcol.get_zorder() * 0.9999
# Up/down ticks from the body
tickcol = mcol.LineCollection(
tickrangesup + tickrangesdown,
colors=tickcolors,
linewidths=tlw,
antialiaseds=useaa,
**kwargs)
# return barcol, tickcol
return barcol, tickcol
def plot_candlestick(ax,
x, opens, highs, lows, closes,
colorup='k', colordown='r',
edgeup=None, edgedown=None,
tickup=None, tickdown=None,
width=1, tickwidth=1.25,
edgeadjust=0.05, edgeshading=-10,
alpha=1.0,
label='_nolegend',
fillup=True,
filldown=True,
**kwargs):
chandler = CandlestickPlotHandler(
ax, x, opens, highs, lows, closes,
colorup, colordown,
edgeup, edgedown,
tickup, tickdown,
width, tickwidth,
edgeadjust, edgeshading,
alpha,
label,
fillup,
filldown,
**kwargs)
# Return the collections. the barcol goes first because
# is the larger, has the dominant zorder and defines the legend
return chandler.barcol, chandler.tickcol
class VolumePlotHandler(object):
legend_vols = [0.5, 1.0, 0.75]
legend_opens = [0, 1, 0]
legend_closes = [1, 0, 1]
def __init__(self,
ax, x, opens, closes, volumes,
colorup='k', colordown='r',
edgeup=None, edgedown=None,
edgeshading=-5, edgeadjust=0.05,
width=1, alpha=1.0,
**kwargs):
# Manage the up/down colors
r, g, b = mcolors.colorConverter.to_rgb(colorup)
self.colorup = r, g, b, alpha
r, g, b = mcolors.colorConverter.to_rgb(colordown)
self.colordown = r, g, b, alpha
# Prepare the edge colors
if not edgeup:
self.edgeup = shade_color(self.colorup, edgeshading)
else:
r, g, b = mcolors.colorConverter.to_rgb(edgeup)
self.edgeup = r, g, b, alpha
if not edgedown:
self.edgedown = shade_color(self.colordown, edgeshading)
else:
r, g, b = mcolors.colorConverter.to_rgb(edgedown)
self.edgedown = r, g, b, alpha
corners = (0, 0), (len(closes), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
self.barcol = self.barcollection(
x, opens, closes, volumes,
width=width, edgeadjust=edgeadjust,
**kwargs)
# add to axes
ax.add_collection(self.barcol)
# Add a legend handler for this object
mlegend.Legend.update_default_handler_map({self.barcol: self})
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0 = handlebox.xdescent
y0 = handlebox.ydescent
width = handlebox.width / len(self.legend_vols)
height = handlebox.height
# Generate the x axis coordinates (handlebox based)
xs = [x0 + width * (i + 0.5) for i in range(len(self.legend_vols))]
barcol = self.barcollection(
xs, self.legend_opens, self.legend_closes, self.legend_vols,
width=width, vscaling=height, vbot=y0)
barcol.set_transform(handlebox.get_transform())
handlebox.add_artist(barcol)
return barcol
def barcollection(self,
x, opens, closes, vols,
width, edgeadjust=0,
vscaling=1.0, vbot=0,
**kwargs):
# Prepare the data
openclose = lambda: zip(opens, closes) # NOQA: E731
# Calculate bars colors
colord = {True: self.colorup, False: self.colordown}
colors = [colord[open < close] for open, close in openclose()]
edgecolord = {True: self.edgeup, False: self.edgedown}
edgecolors = [edgecolord[open < close] for open, close in openclose()]
# bar width to the sides
delta = width / 2 - edgeadjust
# small auxiliary func to return the bar coordinates
def volbar(i, v):
left, right = i - delta, i + delta
v = vbot + v * vscaling
return (left, vbot), (left, v), (right, v), (right, vbot)
barareas = [volbar(i, v) for i, v in zip(x, vols)]
barcol = mcol.PolyCollection(
barareas,
facecolors=colors,
edgecolors=edgecolors,
antialiaseds=(0,),
linewidths=(0.5,),
**kwargs)
return barcol
def plot_volume(
ax, x, opens, closes, volumes,
colorup='k', colordown='r',
edgeup=None, edgedown=None,
edgeshading=-5, edgeadjust=0.05,
width=1, alpha=1.0,
**kwargs):
vhandler = VolumePlotHandler(
ax, x, opens, closes, volumes,
colorup, colordown,
edgeup, edgedown,
edgeshading, edgeadjust,
width, alpha,
**kwargs)
return vhandler.barcol,
class OHLCPlotHandler(object):
legend_opens = [0.50, 0.50, 0.50]
legend_highs = [1.00, 1.00, 1.00]
legend_lows = [0.00, 0.00, 0.00]
legend_closes = [0.80, 0.20, 0.90]
def __init__(self,
ax, x, opens, highs, lows, closes,
colorup='k', colordown='r',
width=1, tickwidth=0.5,
alpha=1.0,
label='_nolegend',
**kwargs):
# Manager up/down bar colors
r, g, b = mcolors.colorConverter.to_rgb(colorup)
self.colorup = r, g, b, alpha
r, g, b = mcolors.colorConverter.to_rgb(colordown)
self.colordown = r, g, b, alpha
bcol, ocol, ccol = self.barcollection(
x, opens, highs, lows, closes,
width=width, tickwidth=tickwidth,
label=label,
**kwargs)
self.barcol = bcol
self.opencol = ocol
self.closecol = ccol
# add collections to the axis and return them
ax.add_collection(self.barcol)
ax.add_collection(self.opencol)
ax.add_collection(self.closecol)
# Update the axis
ax.update_datalim(((0, min(lows)), (len(opens), max(highs))))
ax.autoscale_view()
# Add self as legend handler for this object
mlegend.Legend.update_default_handler_map({self.barcol: self})
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0 = handlebox.xdescent
y0 = handlebox.ydescent
width = handlebox.width / len(self.legend_opens)
height = handlebox.height
# Generate the x axis coordinates (handlebox based)
xs = [x0 + width * (i + 0.5) for i in range(len(self.legend_opens))]
barcol, opencol, closecol = self.barcollection(
xs,
self.legend_opens, self.legend_highs,
self.legend_lows, self.legend_closes,
width=1.5, tickwidth=2,
scaling=height, bot=y0)
barcol.set_transform(handlebox.get_transform())
handlebox.add_artist(barcol)
# opencol.set_transform(handlebox.get_transform())
handlebox.add_artist(opencol)
# closecol.set_transform(handlebox.get_transform())
handlebox.add_artist(closecol)
return barcol, opencol, closecol
def barcollection(self,
xs,
opens, highs, lows, closes,
width, tickwidth,
label='_nolegend',
scaling=1.0, bot=0,
**kwargs):
# Prepack different zips of the series values
ihighlow = lambda: zip(xs, highs, lows) # NOQA: E731
iopen = lambda: zip(xs, opens) # NOQA: E731
iclose = lambda: zip(xs, closes) # NOQA: E731
openclose = lambda: zip(opens, closes) # NOQA: E731
colord = {True: self.colorup, False: self.colordown}
colors = [colord[open < close] for open, close in openclose()]
# Extra variables for the collections
useaa = 0,
lw = width,
tlw = tickwidth,
# Calculate the barranges
def barrange(i, high, low):
return (i, low * scaling + bot), (i, high * scaling + bot)
barranges = [barrange(i, high, low) for i, high, low in ihighlow()]
barcol = mcol.LineCollection(
barranges,
colors=colors,
linewidths=lw,
antialiaseds=useaa,
label=label,
**kwargs)
def tickopen(i, open):
open = open * scaling + bot
return (i - tickwidth, open), (i, open)
openticks = [tickopen(i, open) for i, open in iopen()]
opencol = mcol.LineCollection(
openticks,
colors=colors,
antialiaseds=useaa,
linewidths=tlw,
label='_nolegend',
**kwargs)
def tickclose(i, close):
close = close * scaling + bot
return (i, close), (i + tickwidth, close)
closeticks = [tickclose(i, close) for i, close in iclose()]
closecol = mcol.LineCollection(
closeticks,
colors=colors,
antialiaseds=useaa,
linewidths=tlw,
label='_nolegend',
**kwargs)
# return barcol, tickcol
return barcol, opencol, closecol
def plot_ohlc(ax, x, opens, highs, lows, closes,
colorup='k', colordown='r',
width=1.5, tickwidth=0.5,
alpha=1.0,
label='_nolegend',
**kwargs):
handler = OHLCPlotHandler(
ax, x, opens, highs, lows, closes,
colorup, colordown,
width, tickwidth,
alpha,
label,
**kwargs)
return handler.barcol, handler.opencol, handler.closecol
class LineOnClosePlotHandler(object):
legend_closes = [0.00, 0.66, 0.33, 1.00]
def __init__(self,
ax, x, closes, color='k',
width=1, alpha=1.0,
label='_nolegend',
**kwargs):
self.color = color
self.alpha = alpha
self.loc, = self.barcollection(
x, closes,
width=width,
label=label,
**kwargs)
# add collections to the axis and return them
ax.add_line(self.loc)
# Update the axis
ax.update_datalim(((x[0], min(closes)), (x[-1], max(closes))))
ax.autoscale_view()
# Add self as legend handler for this object
mlegend.Legend.update_default_handler_map({self.loc: self})
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
x0 = handlebox.xdescent
y0 = handlebox.ydescent
width = handlebox.width / len(self.legend_closes)
height = handlebox.height
# Generate the x axis coordinates (handlebox based)
xs = [x0 + width * (i + 0.5) for i in range(len(self.legend_closes))]
linecol, = self.barcollection(
xs, self.legend_closes,
width=1.5,
scaling=height, bot=y0)
linecol.set_transform(handlebox.get_transform())
handlebox.add_artist(linecol)
return linecol,
def barcollection(self,
xs, closes,
width,
label='_nolegend',
scaling=1.0, bot=0,
**kwargs):
# Prepack different zips of the series values
scaled = [close * scaling + bot for close in closes]
loc = mlines.Line2D(
xs, scaled,
color=self.color,
lw=width,
label=label,
alpha=self.alpha,
**kwargs)
return loc,
def plot_lineonclose(ax, x, closes,
color='k',
width=1.5,
alpha=1.0,
label='_nolegend',
**kwargs):
handler = LineOnClosePlotHandler(
ax, x, closes,
color=color, width=width,
alpha=alpha, label=label,
**kwargs)
return handler.loc,
|
gpl-3.0
|
bert9bert/statsmodels
|
statsmodels/genmod/generalized_estimating_equations.py
|
1
|
98363
|
"""
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
KY Liang and S Zeger. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
S Zeger and KY Liang. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
A Rotnitzky and NP Jewell (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
LA Mancl LA, TA DeRouen (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from __future__ import division
from statsmodels.compat.python import range, lzip, zip
import numpy as np
from scipy import stats
import pandas as pd
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
DomainWarning,
IterationLimitWarning,
ValueWarning)
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : array-like
1d array of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : array-like
2d array of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` array where `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be added by the user. See
`statsmodels.tools.add_constant`.
groups : array-like
A 1d array of length `nobs` containing the group labels.
time : array-like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : array-like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array-like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : array-like
An array of weights to use in the analysis. The weights must
be constant within each group. These correspond to
probability weights (pweights) in Stata.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downard bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
where N is the total sample size, and g is the average group size.
Examples
--------
%(example)s
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.family.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.family.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_nominal_family_doc = """\
The default value `None` uses a multinomial logit family
specifically designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
maxiter : integer
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : integer
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : integer
No dependence structure updates occur before this
iteration number.
cov_type : string
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the sum of squared
Pearson residuals divided by `N - ddof_scale`, where N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
where N is the total sample size and g is the average group
size.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
_gee_results_doc = """
Returns
-------
**Attributes**
cov_params_default : ndarray
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndarray
covariance of the parameter estimates that is robust
cov_naive : ndarray
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndarray
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the norm of the score is smaller than a threshold
cov_type : string
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that called `fit`.
normalized_cov_params : array
See GEE docstring
params : array
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_norm : float
norm of the score at the end of the iterative estimation.
bse : array
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print(result.summary())
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_nominal_example = """
Fit a nominal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("nominal")
>>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.NominalGEE.from_formula("y ~ x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.nominal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
class GEE(base.Model):
__doc__ = (
" Estimation of marginal regression models using Generalized\n"
" Estimating Equations (GEE).\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example})
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if family is not None:
if not isinstance(family.link, tuple(family.safe_links)):
import warnings
msg = ("The {0} link function does not respect the "
"domain of the {1} family.")
warnings.warn(msg.format(family.link.__class__.__name__,
family.__class__.__name__),
DomainWarning)
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
groups = np.array(groups) # in case groups is pandas
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
**kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the offset and exposure
self._offset_exposure = None
if offset is not None:
self._offset_exposure = self.offset.copy()
self.offset = offset
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
if self._offset_exposure is not None:
self._offset_exposure += np.log(exposure)
else:
self._offset_exposure = np.log(exposure)
self.exposure = exposure
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the constraint must have "
"the same number of columns as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = (
self.constraint.offset_increment().copy())
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = np.unique(self.groups, return_inverse=True)
se = pd.Series(index=np.arange(len(ix)))
gb = se.groupby(ix).groups
dk = [(lb, np.asarray(gb[k])) for k, lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.weights_li = [x[0] for x in self.weights_li]
self.weights_li = np.asarray(self.weights_li)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
if self._offset_exposure is not None:
self.offset_li = self.cluster_list(self._offset_exposure)
else:
self.offset_li = None
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # assumes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
# Override to allow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array-like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array-like
The data for the model.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : array-like or string
The time values, used for dependence structures involving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : array-like or string
The offset values, added to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : array-like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is added to the offset (if any). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : GEE model instance
Notes
------
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
This method currently does not correctly handle missing
values, so missing values should be explicitly dropped from
the DataFrame before calling this method.
""" % {'missing_param_doc': base._missing_param_doc}
if type(groups) == str:
groups = data[groups]
if type(time) == str:
time = data[time]
if type(offset) == str:
offset = data[offset]
if type(exposure) == str:
exposure = data[exposure]
model = super(GEE, cls).from_formula(formula, data=data, subset=subset,
groups=groups, time=time,
offset=offset,
exposure=exposure,
*args, **kwargs)
return model
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def estimate_scale(self):
"""
Returns an estimate of the scale parameter at the current
parameter value.
"""
if isinstance(self.family, (families.Binomial, families.Poisson,
_Multinomial)):
return 1.
endog = self.endog_li
cached_means = self.cached_means
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
fsum = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
f = self.weights_li[i] if self.weights is not None else 1.
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
scale += f * np.sum(resid ** 2)
fsum += f * len(endog[i])
scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))
return scale
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed.
lin_pred : array-like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be added to
`lin_pred` prior to calling this function.
"""
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : array-like
Values of the independent variables at which the derivative
is calculated.
params : array-like
Parameter values at which the derivative is calculated.
offset_exposure : array-like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = np.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverse_deriv(lin_pred)
dmat = np.outer(idl, params)
return dmat
def _update_mean_params(self):
"""
Returns
-------
update : array-like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array-like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
score += f * np.dot(dmat.T, vinv_resid)
update = np.linalg.solve(bmat, score)
self._fit_history["cov_adjust"].append(
self.cov_struct.cov_adjust)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = np.dot(exog[i], mean_params)
if offset is not None:
lpr += offset[i]
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : array-like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
cov_naive : array-like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
cmat : array-like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
dvinv_resid = f * np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
bmati = np.linalg.inv(bmat)
cov_naive = bmati * scale
cov_robust = np.dot(bmati, np.dot(cmat, bmati))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
return cov_robust, cov_naive, cmat
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen.
def _bc_covmat(self, cov_naive):
cov_naive = cov_naive / self.scaling_factor
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
scale = self.estimate_scale()
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat,))
if rslt is None:
return None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, cov_naive)
hmat = np.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (aresid,))
if rslt is None:
return None
srt = rslt[0]
srt = f * np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))
cov_robust_bc *= self.scaling_factor
return cov_robust_bc
def predict(self, params, exog=None, offset=None,
exposure=None, linear=False):
"""
Return predicted values for a marginal regression model fit
using GEE.
Parameters
----------
params : array-like
Parameters / coefficients of a marginal regression model.
exog : array-like, optional
Design / exogenous data. If exog is None, model exog is
used.
offset : array-like, optional
Offset for exog if provided. If offset is None, model
offset is used.
exposure : array-like, optional
Exposure for exog, if exposure is None, model exposure is
used. Only allowed if link function is the logarithm.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link
function at the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
Using log(V) as the offset is equivalent to using V as the
exposure. If exposure U and offset V are both provided, then
log(U) + V is added to the linear predictor.
"""
# TODO: many paths through this, not well covered in tests
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
# This is the combined offset and exposure
_offset = 0.
# Using model exog
if exog is None:
exog = self.exog
if not isinstance(self.family.link, families.links.Log):
# Don't need to worry about exposure
if offset is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure.copy()
else:
_offset = offset
else:
if offset is None and exposure is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure
elif offset is None and exposure is not None:
_offset = np.log(exposure)
if hasattr(self, "offset"):
_offset = _offset + self.offset
elif offset is not None and exposure is None:
_offset = offset
if hasattr(self, "exposure"):
_offset = offset + np.log(self.exposure)
else:
_offset = offset + np.log(exposure)
# exog is provided: this is simpler than above because we
# never use model exog or exposure if exog is provided.
else:
if offset is not None:
_offset = _offset + offset
if exposure is not None:
_offset += np.log(exposure)
lin_pred = _offset + np.dot(exog, params)
if not linear:
return self.family.link.inverse(lin_pred)
return lin_pred
def _starting_params(self):
# TODO: use GLM to get Poisson starting values
return np.zeros(self.exog.shape[1])
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.):
# Docstring attached below
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = {'params': [],
'score': [],
'dep_params': [],
'cov_adjust': []}
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
mean_params = self._starting_params()
else:
start_params = np.asarray(start_params)
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score ** 2))
self._fit_history['params'].append(mean_params.copy())
self._fit_history['score'].append(score)
self._fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Don't exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep is False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
bc_cov = None
if cov_type == "bias_reduced":
bc_cov = self._bc_covmat(ncov)
if self.constraint is not None:
x = mean_params.copy()
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
y, ncov = self._handle_constraint(x, ncov)
if y is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
if bc_cov is not None:
y, bc_cov = self._handle_constraint(x, bc_cov)
if x is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type=cov_type,
cov_robust=bcov,
cov_naive=ncov,
cov_robust_bc=bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we don't want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
delattr(self, "_fit_history")
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.maxiter = maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_norm", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"maxiter"]
return GEEResultsWrapper(results)
fit.__doc__ = _gee_fit_doc
def _handle_constraint(self, mean_params, bcov):
"""
Expand the parameter estimate `mean_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters
----------
mean_params : array-like
A parameter vector estimate for the reduced model.
bcov : array-like
The covariance matrix of mean_params.
Returns
-------
mean_params : array-like
The input parameter vector mean_params, expanded to the
coordinate system of the full model
bcov : array-like
The input covariance matrix bcov, expanded to the
coordinate system of the full model
"""
# The number of variables in the full model
red_p = len(mean_params)
full_p = self.constraint.lhs.shape[1]
mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]
# Get the score vector under the full model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_fulltrans_li
import copy
save_cached_means = copy.deepcopy(self.cached_means)
self.update_cached_means(mean_params0)
_, score = self._update_mean_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = score[red_p:] / scale
amat = np.linalg.inv(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
mean_params = self.constraint.unpack_param(mean_params)
bcov = self.constraint.unpack_cov(bcov)
self.exog_li = save_exog_li
self.cached_means = save_cached_means
self.exog = self.constraint.restore_exog()
return mean_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects, returns dF(XB) / dX where F(.)
is the fitted mean.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# This form should be appropriate for group 1 probit, logit,
# logistic, cloglog, heckprob, xtprobit.
offset_exposure = None
if exog is None:
exog = self.exog
offset_exposure = self._offset_exposure
margeff = self.mean_deriv_exog(exog, params, offset_exposure)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
class GEEResults(base.LikelihoodModelResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model "
"using GEE.\n" + _gee_results_doc)
def __init__(self, model, params, cov_params, scale,
cov_type='robust', use_t=False, **kwds):
super(GEEResults, self).__init__(
model, params, normalized_cov_params=cov_params,
scale=scale)
# not added by super
self.df_resid = model.df_resid
self.df_model = model.df_model
self.family = model.family
attr_kwds = kwds.pop('attr_kwds', {})
self.__dict__.update(attr_kwds)
# we don't do this if the cov_type has already been set
# subclasses can set it through attr_kwds
if not (hasattr(self, 'cov_type') and
hasattr(self, 'cov_params_default')):
self.cov_type = cov_type # keep alias
covariance_type = self.cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `cov_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if cov_type == "robust":
cov = self.cov_robust
elif cov_type == "naive":
cov = self.cov_naive
elif cov_type == "bias_reduced":
cov = self.cov_robust_bc
self.cov_params_default = cov
else:
if self.cov_type != cov_type:
raise ValueError('cov_type in argument is different from '
'already attached cov_type')
def standard_errors(self, cov_type="robust"):
"""
This is a convenience function that returns the standard
errors for any covariance type. The value of `bse` is the
standard errors for whichever covariance type is specified as
an argument to `fit` (defaults to "robust").
Parameters
----------
cov_type : string
One of "robust", "naive", or "bias_reduced". Determines
the covariance used to compute standard errors. Defaults
to "robust".
"""
# Check covariance_type
covariance_type = cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `covariance_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if covariance_type == "robust":
return np.sqrt(np.diag(self.cov_robust))
elif covariance_type == "naive":
return np.sqrt(np.diag(self.cov_naive))
elif covariance_type == "bias_reduced":
if self.cov_robust_bc is None:
raise ValueError(
"GEE: `bias_reduced` covariance not available")
return np.sqrt(np.diag(self.cov_robust_bc))
# Need to override to allow for different covariance types.
@cache_readonly
def bse(self):
return self.standard_errors(self.cov_type)
@cache_readonly
def resid(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model.
"""
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_split(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model. The residuals are returned as a list
of arrays containing the residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.resid[ii])
return sresid
@cache_readonly
def resid_centered(self):
"""
Returns the residuals centered within each group.
"""
cresid = self.resid.copy()
for v in self.model.group_labels:
ii = self.model.group_indices[v]
cresid[ii] -= cresid[ii].mean()
return cresid
@cache_readonly
def resid_centered_split(self):
"""
Returns the residuals centered within each group. The
residuals are returned as a list of arrays containing the
centered residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.centered_resid[ii])
return sresid
# FIXME: alias to be removed, temporary backwards compatibility
split_resid = resid_split
centered_resid = resid_centered
split_centered_resid = resid_centered_split
@cache_readonly
def resid_response(self):
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_pearson(self):
val = self.model.endog - self.fittedvalues
val = val / np.sqrt(self.family.variance(self.fittedvalues))
return val
@cache_readonly
def resid_working(self):
val = self.resid_response
val = val / self.family.link.deriv(self.fittedvalues)
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self.model.endog, self.fittedvalues)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self.model.endog, self.fittedvalues)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values from the model.
"""
return self.model.family.link.inverse(np.dot(self.model.exog,
self.params))
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc': ''}
def plot_partial_residuals(self, focus_exog, ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc': ''}
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc': ''}
def conf_int(self, alpha=.05, cols=None, cov_type=None):
"""
Returns confidence intervals for the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. i.e., The
default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
cov_type : string
The covariance type used for computing standard errors;
must be one of 'robust', 'naive', and 'bias reduced'.
See `GEE` for details.
Notes
-----
The confidence interval is based on the Gaussian distribution.
"""
# super doesn't allow to specify cov_type and method is not
# implemented,
# FIXME: remove this method here
if cov_type is None:
bse = self.bse
else:
bse = self.standard_errors(cov_type=cov_type)
params = self.params
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = params[cols] - q * bse[cols]
upper = params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the GEE regression results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
cov_type : string
The covariance type used to compute the standard errors;
one of 'robust' (the usual robust sandwich-type covariance
estimate), 'naive' (ignores dependence), and 'bias
reduced' (the Mancl/DeRouen estimate).
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Generalized']),
('', ['Estimating Equations']),
('Family:', [self.model.family.__class__.__name__]),
('Dependence structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Covariance type: ', [self.cov_type, ])
]
NY = [len(y) for y in self.model.endog_li]
top_right = [('No. Observations:', [sum(NY)]),
('No. clusters:', [len(self.model.endog_li)]),
('Min. cluster size:', [min(NY)]),
('Max. cluster size:', [max(NY)]),
('Mean cluster size:', ["%.1f" % np.mean(NY)]),
('Num. iterations:', ['%d' %
len(self.fit_history['params'])]),
('Scale:', ["%.3f" % self.scale]),
('Time:', None),
]
# The skew of the residuals
skew1 = stats.skew(self.resid)
kurt1 = stats.kurtosis(self.resid)
skew2 = stats.skew(self.centered_resid)
kurt2 = stats.kurtosis(self.centered_resid)
diagn_left = [('Skew:', ["%12.4f" % skew1]),
('Centered skew:', ["%12.4f" % skew2])]
diagn_right = [('Kurtosis:', ["%12.4f" % kurt1]),
('Centered kurtosis:', ["%12.4f" % kurt2])
]
if title is None:
title = self.model.__class__.__name__ + ' ' +\
"Regression Results"
# Override the dataframe names if xname is provided as an
# argument.
if xname is not None:
xna = xname
else:
xna = self.model.exog_names
# Create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=self.model.endog_names, xname=xna,
title=title)
smry.add_table_params(self, yname=yname, xname=xna,
alpha=alpha, use_t=False)
smry.add_table_2cols(self, gleft=diagn_left,
gright=diagn_right, yname=yname,
xname=xna, title="")
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is 'all'
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
if self.model.constraint is not None:
warnings.warn("marginal effects ignore constraints",
ValueWarning)
return GEEMargins(self, (at, method, atexog, dummy, count))
def plot_isotropic_dependence(self, ax=None, xpoints=10,
min_n=50):
"""
Create a plot of the pairwise products of within-group
residuals against the corresponding time differences. This
plot can be used to assess the possible form of an isotropic
covariance structure.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
xpoints : scalar or array-like
If scalar, the number of points equally spaced points on
the time difference axis used to define bins for
calculating local means. If an array, the specific points
that define the bins.
min_n : integer
The minimum sample size in a bin for the mean residual
product to be included on the plot.
"""
from statsmodels.graphics import utils as gutils
resid = self.model.cluster_list(self.resid)
time = self.model.cluster_list(self.model.time)
# All within-group pairwise time distances (xdt) and the
# corresponding products of scaled residuals (xre).
xre, xdt = [], []
for re, ti in zip(resid, time):
ix = np.tril_indices(re.shape[0], 0)
re = re[ix[0]] * re[ix[1]] / self.scale ** 2
xre.append(re)
dists = np.sqrt(((ti[ix[0], :] - ti[ix[1], :]) ** 2).sum(1))
xdt.append(dists)
xre = np.concatenate(xre)
xdt = np.concatenate(xdt)
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# Convert to a correlation
ii = np.flatnonzero(xdt == 0)
v0 = np.mean(xre[ii])
xre /= v0
# Use the simple average to smooth, since fancier smoothers
# that trim and downweight outliers give biased results (we
# need the actual mean of a skewed distribution).
if np.isscalar(xpoints):
xpoints = np.linspace(0, max(xdt), xpoints)
dg = np.digitize(xdt, xpoints)
dgu = np.unique(dg)
hist = np.asarray([np.sum(dg == k) for k in dgu])
ii = np.flatnonzero(hist >= min_n)
dgu = dgu[ii]
dgy = np.asarray([np.mean(xre[dg == k]) for k in dgu])
dgx = np.asarray([np.mean(xdt[dg == k]) for k in dgu])
ax.plot(dgx, dgy, '-', color='orange', lw=5)
ax.set_xlabel("Time difference")
ax.set_ylabel("Product of scaled residuals")
return fig
def sensitivity_params(self, dep_params_first,
dep_params_last, num_steps):
"""
Refits the GEE model using a sequence of values for the
dependence parameters.
Parameters
----------
dep_params_first : array-like
The first dep_params in the sequence
dep_params_last : array-like
The last dep_params in the sequence
num_steps : int
The number of dep_params in the sequence
Returns
-------
results : array-like
The GEEResults objects resulting from the fits.
"""
model = self.model
import copy
cov_struct = copy.deepcopy(self.model.cov_struct)
# We are fixing the dependence structure in each run.
update_dep = model.update_dep
model.update_dep = False
dep_params = []
results = []
for x in np.linspace(0, 1, num_steps):
dp = x * dep_params_last + (1 - x) * dep_params_first
dep_params.append(dp)
model.cov_struct = copy.deepcopy(cov_struct)
model.cov_struct.dep_params = dp
rslt = model.fit(start_params=self.params,
ctol=self.ctol,
params_niter=self.params_niter,
first_dep_update=self.first_dep_update,
cov_type=self.cov_type)
results.append(rslt)
model.update_dep = update_dep
return results
# FIXME: alias to be removed, temporary backwards compatibility
params_sensitivity = sensitivity_params
class GEEResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'centered_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GEEResultsWrapper, GEEResults)
class OrdinalGEE(GEE):
__doc__ = (
" Estimation of ordinal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_ordinal_family_doc,
'example': _gee_ordinal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
if family is None:
family = families.Binomial()
else:
if not isinstance(family, families.Binomial):
raise ValueError("ordinal GEE must use a Binomial family")
if cov_struct is None:
cov_struct = cov_structs.OrdinalIndependence()
endog, exog, groups, time, offset = self.setup_ordinal(
endog, exog, groups, time, offset)
super(OrdinalGEE, self).__init__(endog, exog, groups, time,
family, cov_struct, missing,
offset, dep_data, constraint)
def setup_ordinal(self, endog, exog, groups, time, offset):
"""
Restructure ordinal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
nrows = ncut * len(endog)
exog_out = np.zeros((nrows, exog.shape[1]),
dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
intercepts = np.zeros((nrows, ncut), dtype=np.float64)
groups_out = np.zeros(nrows, dtype=groups.dtype)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
exog_out[jrow, :] = exog_row
endog_out[jrow] = (int(endog_value > thresh))
intercepts[jrow, thresh_ix] = 1
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
exog_out = np.concatenate((intercepts, exog_out), axis=1)
# exog column names, including intercepts
xnames = ["I(y>%.1f)" % v for v in endog_cuts]
if type(self.exog_orig) == pd.DataFrame:
xnames.extend(self.exog_orig.columns)
else:
xnames.extend(["x%d" % k for k in range(1, exog.shape[1] + 1)])
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve the endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(OrdinalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to an OrdinalGEEResults
ord_rslt = OrdinalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# for k in rslt._props:
# setattr(ord_rslt, k, getattr(rslt, k))
return OrdinalGEEResultsWrapper(ord_rslt)
fit.__doc__ = _gee_fit_doc
class OrdinalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for an ordinal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an ordinal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array-like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ev = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ev)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
exog_means = self.model.exog.mean(0)
ix_icept = [i for i, x in enumerate(self.model.exog_names) if
x.startswith("I(")]
for ev in exog_values:
for k in ev.keys():
if k not in self.model.exog_names:
raise ValueError("%s is not a variable in the model"
% k)
# Get the fitted probability for each level, at the given
# covariate values.
pr = []
for j in ix_icept:
xp = np.zeros_like(self.params)
xp[j] = 1.
for i, vn in enumerate(self.model.exog_names):
if i in ix_icept:
continue
# User-specified value
if vn in ev:
xp[i] = ev[vn]
# Mean value
else:
xp[i] = exog_means[i]
p = 1 / (1 + np.exp(-np.dot(xp, self.params)))
pr.append(p)
pr.insert(0, 1)
pr.append(0)
pr = np.asarray(pr)
prd = -np.diff(pr)
ax.plot(self.model.endog_values, prd, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_ylim(0, 1)
return fig
class OrdinalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(OrdinalGEEResultsWrapper, OrdinalGEEResults)
class NominalGEE(GEE):
__doc__ = (
" Estimation of nominal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_nominal_family_doc,
'example': _gee_nominal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
endog, exog, groups, time, offset = self.setup_nominal(
endog, exog, groups, time, offset)
if family is None:
family = _Multinomial(self.ncut + 1)
if cov_struct is None:
cov_struct = cov_structs.NominalIndependence()
super(NominalGEE, self).__init__(
endog, exog, groups, time, family, cov_struct, missing,
offset, dep_data, constraint)
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def setup_nominal(self, endog, exog, groups, time, offset):
"""
Restructure nominal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
self.ncut = ncut
nrows = len(endog_cuts) * exog.shape[0]
ncols = len(endog_cuts) * exog.shape[1]
exog_out = np.zeros((nrows, ncols), dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
groups_out = np.zeros(nrows, dtype=np.float64)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
u = np.zeros(len(endog_cuts), dtype=np.float64)
u[thresh_ix] = 1
exog_out[jrow, :] = np.kron(u, exog_row)
endog_out[jrow] = (int(endog_value == thresh))
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
# exog names
if type(self.exog_orig) == pd.DataFrame:
xnames_in = self.exog_orig.columns
else:
xnames_in = ["x%d" % k for k in range(1, exog.shape[1] + 1)]
xnames = []
for tr in endog_cuts:
xnames.extend(["%s[%.1f]" % (v, tr) for v in xnames_in])
exog_out = pd.DataFrame(exog_out, columns=xnames)
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lin_pred : array-like
The values of the linear predictor, length must be multiple
of `ncut`.
Returns
-------
The derivative of the expected endog with respect to the
parameters.
"""
expval = np.exp(lin_pred)
# Reshape so that each row contains all the indicators
# corresponding to one multinomial observation.
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
# The normalizing constant for the multinomial probabilities.
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
# The multinomial probabilities
mprob = expval / denom
# First term of the derivative: denom * expval' / denom^2 =
# expval' / denom.
dmat = mprob[:, None] * exog
# Second term of the derivative: -expval * denom' / denom^2
ddenom = expval[:, None] * exog
dmat -= mprob[:, None] * ddenom / denom[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog for the
multinomial model, used in analyzing marginal effects.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lpr : array-like
The linear predictor values, length must be multiple of
`ncut`.
Returns
-------
The value of the derivative of the expected endog with respect
to exog.
Notes
-----
offset_exposure must be set at None for the multinoial family.
"""
if offset_exposure is not None:
warnings.warn("Offset/exposure ignored for the multinomial family",
ValueWarning)
lpr = np.dot(exog, params)
expval = np.exp(lpr)
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
bmat0 = np.outer(np.ones(exog.shape[0]), params)
# Masking matrix
qmat = []
for j in range(self.ncut):
ee = np.zeros(self.ncut, dtype=np.float64)
ee[j] = 1
qmat.append(np.kron(ee, np.ones(len(params) // self.ncut)))
qmat = np.array(qmat)
qmat = np.kron(np.ones((exog.shape[0] // self.ncut, 1)), qmat)
bmat = bmat0 * qmat
dmat = expval[:, None] * bmat / denom[:, None]
expval_mb = np.kron(expval_m, np.ones((self.ncut, 1)))
expval_mb = np.kron(expval_mb, np.ones((1, self.ncut)))
dmat -= expval[:, None] * (bmat * expval_mb) / denom[:, None] ** 2
return dmat
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(NominalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
if rslt is None:
warnings.warn("GEE updates did not converge",
ConvergenceWarning)
return None
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to a NominalGEEResults
nom_rslt = NominalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# for k in rslt._props:
# setattr(nom_rslt, k, getattr(rslt, k))
return NominalGEEResultsWrapper(nom_rslt)
fit.__doc__ = _gee_fit_doc
class NominalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for a nominal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an nominal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array-like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ex = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ex)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
link = self.model.family.link.inverse
ncut = self.model.family.ncut
k = int(self.model.exog.shape[1] / ncut)
exog_means = self.model.exog.mean(0)[0:k]
exog_names = self.model.exog_names[0:k]
exog_names = [x.split("[")[0] for x in exog_names]
params = np.reshape(self.params,
(ncut, len(self.params) // ncut))
for ev in exog_values:
exog = exog_means.copy()
for k in ev.keys():
if k not in exog_names:
raise ValueError("%s is not a variable in the model"
% k)
ii = exog_names.index(k)
exog[ii] = ev[k]
lpr = np.dot(params, exog)
pr = link(lpr)
pr = np.r_[pr, 1 - pr.sum()]
ax.plot(self.model.endog_values, pr, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_xticks(self.model.endog_values)
ax.set_xticklabels(self.model.endog_values)
ax.set_ylim(0, 1)
return fig
class NominalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(NominalGEEResultsWrapper, NominalGEEResults)
class _MultinomialLogit(Link):
"""
The multinomial logit transform, only for use with GEE.
Notes
-----
The data are assumed coded as binary indicators, where each
observed multinomial value y is coded as I(y == S[0]), ..., I(y ==
S[-1]), where S is the set of possible response labels, excluding
the largest one. Thererefore functions in this class should only
be called using vector argument whose length is a multiple of |S|
= ncut, which is an argument to be provided when initializing the
class.
call and derivative use a private method _clean to trim p by 1e-10
so that p is in (0, 1)
"""
def __init__(self, ncut):
self.ncut = ncut
def inverse(self, lpr):
"""
Inverse of the multinomial logit transform, which gives the
expected values of the data as a function of the linear
predictors.
Parameters
----------
lpr : array-like (length must be divisible by `ncut`)
The linear predictors
Returns
-------
prob : array
Probabilities, or expected values
"""
expval = np.exp(lpr)
denom = 1 + np.reshape(expval, (len(expval) // self.ncut,
self.ncut)).sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
prob = expval / denom
return prob
class _Multinomial(families.Family):
"""
Pseudo-link function for fitting nominal multinomial models with
GEE. Not for use outside the GEE class.
"""
links = [_MultinomialLogit, ]
variance = varfuncs.binary
safe_links = [_MultinomialLogit, ]
def __init__(self, nlevels):
"""
Parameters
----------
nlevels : integer
The number of distinct categories for the multinomial
distribution.
"""
self.initialize(nlevels)
def initialize(self, nlevels):
self.ncut = nlevels - 1
self.link = _MultinomialLogit(self.ncut)
from statsmodels.discrete.discrete_margins import (
_get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,
_check_at_is_all, _transform_names, _check_discrete_args,
_get_dummy_index, _get_count_index)
class GEEMargins(object):
"""
Estimated marginal effects for a regression model fit with GEE.
Parameters
----------
results : GEEResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = resettable_cache()
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = resettable_cache()
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
"""
_check_at_is_all(self.margeff_options)
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i, name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return stats.norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = stats.norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]), ]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
const_idx = model.data.const_idx
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
# NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:, eq], margeff_se[:, eq],
tvalues[:, eq], pvalues[:, eq], conf_int[:, :, eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha,
use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|',
'[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]
tble.insert_header_row(0, header)
# from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
self._reset() # always reset the cache when this is called
# TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx = exog.var(0) != 0
const_idx = model.data.const_idx
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
effects = _effects_at(effects, at)
if at == 'all':
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(
model, params, exog, results.cov_params(), at,
model._derivative_exog, dummy_idx, count_idx,
method, 1)
# don't care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
0.21/_downloads/adc71f8bf709dad236827dbaa76100e9/plot_decoding_time_generalization_conditions.py
|
4
|
3617
|
"""
=========================================================================
Decoding sensor space data with generalization across time and conditions
=========================================================================
This example runs the analysis described in [1]_. It illustrates how one can
fit a linear classifier to identify a discriminatory topography at a given time
instant and subsequently assess whether this linear model can accurately
predict all of the time samples of a second set of conditions.
References
----------
.. [1] King & Dehaene (2014) 'Characterizing the dynamics of mental
representations: the Temporal Generalization method', Trends In
Cognitive Sciences, 18(4), 203-210. doi: 10.1016/j.tics.2014.01.002.
"""
# Authors: Jean-Remi King <[email protected]>
# Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import GeneralizingEstimator
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
events_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
picks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels
raw.filter(1., 30., fir_design='firwin') # Band pass filtering signals
events = mne.read_events(events_fname)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2,
'Visual/Left': 3, 'Visual/Right': 4}
tmin = -0.050
tmax = 0.400
# decimate to make the example faster to run, but then use verbose='error' in
# the Epochs constructor to suppress warning about decimation causing aliasing
decim = 2
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax,
proj=True, picks=picks, baseline=None, preload=True,
reject=dict(mag=5e-12), decim=decim, verbose='error')
###############################################################################
# We will train the classifier on all left visual vs auditory trials
# and test on all right visual vs auditory trials.
clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
time_gen = GeneralizingEstimator(clf, scoring='roc_auc', n_jobs=1,
verbose=True)
# Fit classifiers on the epochs where the stimulus was presented to the left.
# Note that the experimental condition y indicates auditory or visual
time_gen.fit(X=epochs['Left'].get_data(),
y=epochs['Left'].events[:, 2] > 2)
###############################################################################
# Score on the epochs where the stimulus was presented to the right.
scores = time_gen.score(X=epochs['Right'].get_data(),
y=epochs['Right'].events[:, 2] > 2)
###############################################################################
# Plot
fig, ax = plt.subplots(1)
im = ax.matshow(scores, vmin=0, vmax=1., cmap='RdBu_r', origin='lower',
extent=epochs.times[[0, -1, 0, -1]])
ax.axhline(0., color='k')
ax.axvline(0., color='k')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Generalization across time and condition')
plt.colorbar(im, ax=ax)
plt.show()
|
bsd-3-clause
|
Julio-Anjos/Bighybrid
|
tools/count_elements-mra.py
|
4
|
4417
|
#!/usr/bin/python
import sys
import matplotlib.pyplot as plt; plt.rcdefaults()
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
plt.rcParams.update({'font.size': 12})
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from matplotlib.colors import colorConverter
class Task:
MAP = 0
REDUCE = 1
SHUFFLE = 2
def __init__(self, t, start=0, end=0):
self._start = start
self._end = end
self._type = t
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def type(self):
return self._type
@start.setter
def start(self, v):
self._start = v
@end.setter
def end(self, v):
self._end = v
@type.setter
def type(self, v):
self._type = v
if len(sys.argv) < 3:
print 'Please provide input and output files'
exit(-1)
events = []
tasks = {}
finput = open(sys.argv[1], 'r')
# First line seems to be labels
finput.readline()
for line in finput:
fields = line.strip().split(',')
taskid = fields[0]
desc = fields[1]
time = float(fields[3])
operation = fields[4]
if 'START' in operation:
if '_REDUCE' in desc:
shuffle = Task(Task.SHUFFLE, start=time)
tasks[taskid + '_r'] = shuffle
else:
task = Task(Task.MAP, start=time)
tasks[taskid] = task
elif 'END' in operation:
if '_REDUCE' in desc:
shuffle = tasks[taskid + '_r']
shuffle.end = float(fields[5])
reduce = Task(Task.REDUCE, start=float(fields[5]), end=time)
tasks[taskid] = reduce
events.append(shuffle.end)
else:
map = tasks[taskid]
map.end = time
events.append(time)
finput.close()
foutput = open(sys.argv[2], 'w')
foutput.write("time,n_maps,n_reduces,n_shuffles\n")
maps = [0]
reduces = [0]
shuffles = [0]
times = [0]
last_time = 0
events.sort()
for time in events:
map = 0
reduce = 0
shuffle = 0
for task in tasks.values():
if task.start <= time and task.end >= time:
# if cmp(task.start, time) <= 0 and cmp(task.end, time) >= 0:
if task.type == Task.MAP:
map += 1
elif task.type == Task.REDUCE:
reduce += 1
else:
shuffle += 1
foutput.write( '%.5f,%d,%d,%d\n' % (time, map, reduce, shuffle) )
times.append(time)
maps.append(map)
reduces.append(reduce)
shuffles.append(shuffle)
last_time = time
foutput.close()
times.append(last_time + 0.01)
maps.append(0)
reduces.append(0)
shuffles.append(0)
# fig, ax = plt.subplots()
# ax.plot(np.array(times), maps, 'r-', alpha=0.5, linewidth=2)
# ax.plot(np.array(times), reduces, 'b-', alpha=0.5, linewidth=2)
# ax.plot(np.array(times), shuffles, 'g-', alpha=0.5, linewidth=2)
# ax.legend(['maps', 'reduces', 'shuffles'])
# ax.set_ylim(0,15)
# ax.set_ylabel("Number of elements")
# ax.set_xlabel("Time")
# code required to create a 3d, filled line graph
fig = plt.figure()
ax = fig.gca(projection='3d')
cc = lambda arg: colorConverter.to_rgba(arg)
verts = []
zs = [0.1, 0.2, 0.3]
verts.append(list(zip(times, reduces)))
verts.append(list(zip(times, maps)))
verts.append(list(zip(times, shuffles)))
poly = PolyCollection(verts, offsets=None, facecolors = [ cc('#48d1cc'), cc('r'), cc('#f0ffff')], closed=False)
poly.set_alpha(0.75)
ax.add_collection3d(poly, zs=zs, zdir='y')
ax.invert_zaxis()
ax.view_init(elev=15., azim=280)
ax.set_xlabel('Time (s)')
ax.set_xlim3d(0, last_time)
ax.set_ylim3d(0.1, 0.3)
ax.set_zlabel('\# Tasks')
ax.set_zlim3d(0, 12)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_yaxis().set_ticks([])
reduce_proxy = plt.Rectangle((0, 0), 1, 1, fc=cc('#48d1cc'))
map_proxy = plt.Rectangle((0, 0), 1, 1, fc=cc('r'))
shuffle_proxy = plt.Rectangle((0, 0), 1, 1, fc=cc('#f0ffff'))
ax.legend([map_proxy,shuffle_proxy, reduce_proxy],['map', 'shuffle', 'reduce'], ncol=3, loc='upper center')
#plt.show()
plt.savefig('graph.png')
|
gpl-3.0
|
nanophotonics/nplab
|
nplab/experiment/fiber_raman/experiment.py
|
1
|
4712
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import range
import numpy as np
import matplotlib.pyplot as plt
from nplab.instrument.spectrometer.acton_2300i import Acton
from nplab.instrument.camera.Picam.pixis import Pixis
from .Pacton import Pacton
from nplab import datafile as df
from .spectrum_aligner_ir import grating_300gmm as get_wavelength_map #grating_300gmm
mapper = get_wavelength_map()
def SetSensorTemperatureSetPoint(self,temperature):
param_name = "PicamParameter_SensorTemperatureSetPoint"
return self.set_parameter(parameter_name=param_name,parameter_value=temperature)
def nm_to_raman_shift(laser_wavelength,wavelength):
raman_shift = 1e7*(1.0/laser_wavelength - 1.0/wavelength)
return raman_shift
def initialize_datafile(path):
"""
This function defines the format of the group structure
Parameters are path on filesystem
"""
f = df.DataFile(path, 'a')
return f
def initialize_measurement(acton_port, exposure_time = 100):
print("Starting..")
print("Pixis...")
p = Pixis(debug=1)
p.StartUp()
print("Acton...")
act = Acton(port=acton_port, debug=1)
print("Done...")
pacton = Pacton(pixis=p,acton=act)
print("Measuring...")
p.SetExposureTime(exposure_time)
# print pacton.acton.read_grating()
pacton.acton.set_grating(1)
# print "New grating",pacton.acton.read_grating_name()
return pacton
# Maunually find the COM port of Acton Spectrometer - set exposure time (default)
def single_shot(pacton, file, center_wavelength = 0, show_ = True):
data_group = file.require_group("zero_wavelength_images")
img = pacton.get_image(center_wavelength,debug=0)
exptime = pacton.pixis.GetExposureTime()
attrs = {"exposure_time":exptime,"center_wavelength":center_wavelength}
data_group.create_dataset("image_%d",data=img, attrs = attrs)
data_group.file.flush()
if show_ == True:
#exercise: put in if statement, only run when suitable flag (ie. input parameter is set to true)
fig, ax = plt.subplots(1)
ax.imshow(img, cmap='gray')
plt.show()
def get_spectrum(pacton,file,y_roi,center_wavelength,exposure_time,show_= False, debug = 0, laser_wavelength = None):
if debug > 0:
print("y_roi",y_roi)
print("center_wavelength",center_wavelength)
print("exposure_time",exposure_time)
spectrum_group = file.require_group("spectrum")
#run calibration to get rid of low pixel value high intensity peaks due to camera response
#get spectrum, drop interpolated wavelengths - they are wrong!
[ymin,ymax] = y_roi
xmin,xmax = [0,1024] #default to be over entire range!
roi = [xmin,xmax,ymin,ymax]
if debug > 0: print("Fiddling with exposure...")
previous_exposure_time = pacton.pixis.GetExposureTime()
pacton.pixis.SetExposureTime(exposure_time)
if debug > 0: print("Getting spectrum...")
spectrum,_ = pacton.get_spectrum(center_wavelength=center_wavelength,roi=roi)
pacton.pixis.SetExposureTime(previous_exposure_time)
pixel_indices = np.arange(0,1014)
if debug > 0: print("Starting wavelength map...")
wavelengths = [mapper(center_wavelength,i) for i in pixel_indices]
if laser_wavelength is not None:
raman_shifts = [nm_to_raman_shift(laser_wavelength=laser_wavelength,wavelength=wl) for wl in wavelengths]
attrs = {
"wavelengths":wavelengths,
"raman_shift": raman_shifts,
"center_wavelength":center_wavelength,
"roi":roi,
"exposure_time[ms]": exposure_time
}
if debug > 0: print("writing data...")
spectrum_group.create_dataset("series_%d",data=spectrum,attrs=attrs)
if show_ == True:
if laser_wavelength is None:
fig, ax =plt.subplots(1)
ax.plot(wavelengths,spectrum)
ax.set_xlabel("wavelength [nm]")
ax.set_ylabel("intensity [a.u.]")
elif laser_wavelength is not None:
fig, [ax1,ax2] = plt.subplots(2)
ax1.plot(wavelengths,spectrum)
ax2.plot(raman_shifts,spectrum)
ax1.set_xlabel("wavelength [nm]")
ax1.set_ylabel("intensity [a.u.]")
ax2.set_xlabel("raman shift [$cm^{-1}$]")
ax2.set_ylabel("intensity [a.u.]")
plt.show()
def experiment(pacton, file, functions,argss,kwargss):
for f, args,kwargs in zip(functions,argss,kwargss):
f(pacton,file,*args,**kwargs)
return 0
if __name__ == "__main__":
file = initialize_datafile('C:\\Users\\Hera\\Desktop\\New folder\\20190315\\spectra\\ECDMCCenter840_vc1percent.hdf5')
pacton = initialize_measurement('COM5', 100)
pacton.get_pixel_response_calibration_spectrum()
# experiment(pacton,file, [single_shot],[()],[({"show_":True})])
for i in range(30):
get_spectrum(pacton,file,y_roi=[514,600],center_wavelength=840,exposure_time=10000,laser_wavelength=785,debug=0)
print(i)
#single_shot(pacton,file,show_ = True)
|
gpl-3.0
|
olologin/scikit-learn
|
sklearn/datasets/tests/test_20news.py
|
280
|
3045
|
"""Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
|
bsd-3-clause
|
gfyoung/pandas
|
pandas/tests/window/moments/test_moments_rolling_quantile.py
|
2
|
5037
|
from functools import partial
import numpy as np
import pytest
from pandas import DataFrame, Series, concat, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1.0 * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = idx / (values.shape[0] - 1)
qhig = (idx + 1) / (values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_series(series, q):
compare_func = partial(scoreatpercentile, per=q)
result = series.rolling(50).quantile(q)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_frame(raw, frame, q):
compare_func = partial(scoreatpercentile, per=q)
result = frame.rolling(50).quantile(q)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_time_rule_series(series, q):
compare_func = partial(scoreatpercentile, per=q)
win = 25
ser = series[::2].resample("B").mean()
series_result = ser.rolling(window=win, min_periods=10).quantile(q)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_time_rule_frame(raw, frame, q):
compare_func = partial(scoreatpercentile, per=q)
win = 25
frm = frame[::2].resample("B").mean()
frame_result = frm.rolling(window=win, min_periods=10).quantile(q)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_nans(q):
compare_func = partial(scoreatpercentile, per=q)
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = obj.rolling(50, min_periods=30).quantile(q)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = obj.rolling(20, min_periods=15).quantile(q)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = obj2.rolling(10, min_periods=5).quantile(q)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
result0 = obj.rolling(20, min_periods=0).quantile(q)
result1 = obj.rolling(20, min_periods=1).quantile(q)
tm.assert_almost_equal(result0, result1)
@pytest.mark.parametrize("minp", [0, 99, 100])
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_min_periods(series, minp, q):
result = series.rolling(len(series) + 1, min_periods=minp).quantile(q)
expected = series.rolling(len(series), min_periods=minp).quantile(q)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_center(q):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = obj.rolling(20, center=True).quantile(q)
expected = (
concat([obj, Series([np.NaN] * 9)])
.rolling(20)
.quantile(q)[9:]
.reset_index(drop=True)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_center_reindex_series(series, q):
# shifter index
s = [f"x{x:d}" for x in range(12)]
series_xp = (
series.reindex(list(series.index) + s)
.rolling(window=25)
.quantile(q)
.shift(-12)
.reindex(series.index)
)
series_rs = series.rolling(window=25, center=True).quantile(q)
tm.assert_series_equal(series_xp, series_rs)
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_center_reindex_frame(frame, q):
# shifter index
s = [f"x{x:d}" for x in range(12)]
frame_xp = (
frame.reindex(list(frame.index) + s)
.rolling(window=25)
.quantile(q)
.shift(-12)
.reindex(frame.index)
)
frame_rs = frame.rolling(window=25, center=True).quantile(q)
tm.assert_frame_equal(frame_xp, frame_rs)
|
bsd-3-clause
|
napjon/ds-nd
|
p5-introml/tools/startup.py
|
7
|
1048
|
#!/usr/bin/python
print
print "checking for nltk"
try:
import nltk
except ImportError:
print "you should install nltk before continuing"
print "checking for numpy"
try:
import numpy
except ImportError:
print "you should install numpy before continuing"
print "checking for sklearn"
try:
import sklearn
except:
print "you should install sklearn before continuing"
print
print "downloading the Enron dataset (this may take a while)"
print "to check on progress, you can cd up one level, then execute <ls -lthr>"
print "Enron dataset should be last item on the list, along with its current size"
print "download will complete at about 423 MB"
import urllib
url = "https://www.cs.cmu.edu/~./enron/enron_mail_20150507.tgz"
urllib.urlretrieve(url, filename="../enron_mail_20150507.tgz")
print "download complete!"
print
print "unzipping Enron dataset (this may take a while)"
import tarfile
import os
os.chdir("..")
tfile = tarfile.open("enron_mail_20150507.tgz", "r:gz")
tfile.extractall(".")
print "you're ready to go!"
|
mit
|
caneGuy/spark
|
python/pyspark/sql/tests/test_dataframe.py
|
4
|
34680
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pydoc
import time
import unittest
from pyspark.sql import SparkSession, Row
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException, IllegalArgumentException
from pyspark.testing.sqlutils import ReusedSQLTestCase, SQLTestUtils, have_pyarrow, have_pandas, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
class DataFrameTests(ReusedSQLTestCase):
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
# add tests for SPARK-23647 (test more types for hint)
def test_extended_hint_types(self):
from pyspark.sql import DataFrame
df = self.spark.range(10e10).toDF("id")
such_a_nice_list = ["itworks1", "itworks2", "itworks3"]
hinted_df = df.hint("my awesome hint", 1.2345, "what", such_a_nice_list)
logical_plan = hinted_df._jdf.queryExecution().logical()
self.assertEqual(1, logical_plan.toString().count("1.2345"))
self.assertEqual(1, logical_plan.toString().count("what"))
self.assertEqual(3, logical_plan.toString().count("itworks"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_cache(self):
spark = self.spark
with self.tempView("tab1", "tab2"):
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]}, columns=["d", "ts"])
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_dst(self):
import pandas as pd
from pandas.util.testing import assert_frame_equal
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
def test_to_local_iterator(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator()
self.assertEqual(expected, list(it))
# Test DataFrame with empty partition
df = self.spark.range(3, numPartitions=4)
it = df.toLocalIterator()
expected = df.collect()
self.assertEqual(expected, list(it))
def test_to_local_iterator_prefetch(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator(prefetchPartitions=True)
self.assertEqual(expected, list(it))
def test_to_local_iterator_not_fully_consumed(self):
# SPARK-23961: toLocalIterator throws exception when not fully consumed
# Create a DataFrame large enough so that write to socket will eventually block
df = self.spark.range(1 << 20, numPartitions=2)
it = df.toLocalIterator()
self.assertEqual(df.take(1)[0], next(it))
with QuietTest(self.sc):
it = None # remove iterator from scope, socket is closed when cleaned up
# Make sure normal df operations still work
result = []
for i, row in enumerate(df.toLocalIterator()):
result.append(row)
if i == 7:
break
self.assertEqual(df.take(8), result)
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
if __name__ == "__main__":
from pyspark.sql.tests.test_dataframe import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
public-ink/public-ink
|
server/appengine/lib/matplotlib/testing/__init__.py
|
10
|
3767
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from contextlib import contextmanager
from matplotlib.cbook import is_string_like, iterable
from matplotlib import rcParams, rcdefaults, use
def _is_list_like(obj):
"""Returns whether the obj is iterable and not a string"""
return not is_string_like(obj) and iterable(obj)
# stolen from pandas
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not _is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
def set_font_settings_for_testing():
rcParams['font.family'] = 'DejaVu Sans'
rcParams['text.hinting'] = False
rcParams['text.hinting_factor'] = 8
def setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
use('Agg', warn=False) # use Agg backend for these tests
# These settings *must* be hardcoded for running the comparison
# tests and are not necessarily the default values as specified in
# rcsetup.py
rcdefaults() # Start with all defaults
set_font_settings_for_testing()
|
gpl-3.0
|
iismd17/scikit-learn
|
sklearn/neighbors/tests/test_kde.py
|
208
|
5556
|
import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
|
bsd-3-clause
|
mtrbean/scipy
|
scipy/stats/kde.py
|
18
|
17306
|
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=np.float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
|
bsd-3-clause
|
robcarver17/pysystemtrade
|
sysexecution/orders/list_of_orders.py
|
1
|
2634
|
import numpy as np
import datetime
import pandas as pd
from sysexecution.trade_qty import listOfTradeQuantity, tradeQuantity
class listOfFillDatetime(list):
def final_fill_datetime(self):
valid_dates = [dt for dt in self if dt is not None]
return max(valid_dates)
class listOfOrders(list):
def as_pd(self) -> pd.DataFrame:
date_list = [order.fill_datetime for order in self]
key_list = [order.key for order in self]
trade_list = [order.trade for order in self]
fill_list = [order.fill for order in self]
id_list = [order.order_id for order in self]
price_list = [order.filled_price for order in self]
pd_df = pd.DataFrame(
dict(
fill_datetime=date_list,
key=key_list,
trade = trade_list,
fill=fill_list,
price=price_list),
index=id_list,
)
return pd_df
def list_of_filled_price(self) -> list:
list_of_filled_price = [
order.filled_price for order in self]
return list_of_filled_price
def average_fill_price(self) -> float:
def _nan_for_none(x):
if x is None:
return np.nan
else:
return x
list_of_filled_price = self.list_of_filled_price()
list_of_filled_price = [_nan_for_none(x) for x in list_of_filled_price]
average_fill_price = np.nanmean(list_of_filled_price)
if np.isnan(average_fill_price):
return None
return average_fill_price
def list_of_filled_datetime(self) -> listOfFillDatetime:
list_of_filled_datetime = listOfFillDatetime(
[order.fill_datetime for order in self]
)
return list_of_filled_datetime
def final_fill_datetime(self) -> datetime.datetime:
list_of_filled_datetime = self.list_of_filled_datetime()
final_fill_datetime =list_of_filled_datetime.final_fill_datetime()
return final_fill_datetime
def list_of_filled_qty(self) -> listOfTradeQuantity:
list_of_filled_qty = [order.fill for order in self]
list_of_filled_qty = listOfTradeQuantity(list_of_filled_qty)
return list_of_filled_qty
def total_filled_qty(self) -> tradeQuantity:
list_of_filled_qty = self.list_of_filled_qty()
return list_of_filled_qty.total_filled_qty()
def all_zero_fills(self) -> bool:
list_of_filled_qty = self.list_of_filled_qty()
zero_fills = [fill.equals_zero() for fill in list_of_filled_qty]
return all(zero_fills)
|
gpl-3.0
|
open-craft/edx-analytics-pipeline
|
edx/analytics/tasks/reports/enrollments.py
|
2
|
11885
|
"""Enrollment related reports"""
import csv
from datetime import timedelta, date
import luigi
import luigi.hdfs
import numpy
import pandas
from edx.analytics.tasks.util.tsv import read_tsv
from edx.analytics.tasks.url import ExternalURL, get_target_from_url, url_path_join
from edx.analytics.tasks.course_enroll import CourseEnrollmentChangesPerDay
from edx.analytics.tasks.mapreduce import MapReduceJobTaskMixin
from edx.analytics.tasks.util.opaque_key_util import get_org_id_for_course
DEFAULT_NUM_WEEKS = 52
DEFAULT_NUM_DAYS = 28
class CourseEnrollmentCountMixin(MapReduceJobTaskMixin):
""" Provides common parameters used in executive report tasks """
name = luigi.Parameter()
src = luigi.Parameter(
is_list=True,
config_path={'section': 'enrollment-reports', 'name': 'src'},
)
include = luigi.Parameter(is_list=True, default=('*',))
weeks = luigi.IntParameter(default=DEFAULT_NUM_WEEKS)
days = luigi.Parameter(default=DEFAULT_NUM_DAYS)
offsets = luigi.Parameter(default=None)
history = luigi.Parameter(default=None)
date = luigi.DateParameter(default=date.today())
statuses = luigi.Parameter(default=None)
manifest = luigi.Parameter(default=None)
manifest_path = luigi.Parameter(default=None)
destination_directory = luigi.Parameter(default=None)
destination = luigi.Parameter(config_path={'section': 'enrollment-reports', 'name': 'destination'})
credentials = luigi.Parameter(
config_path={'section': 'database-import', 'name': 'credentials'}
)
blacklist = luigi.Parameter(config_path={'section': 'enrollment-reports', 'name': 'blacklist'})
"""Provides methods useful for generating reports using course enrollment counts."""
def read_course_date_count_tsv(self, input_file):
"""Read TSV file with hard-coded column names into a pandas DataFrame."""
names = ['course_id', 'date', 'count']
# Not assuming any encoding, course_id will be read as plain string
data = read_tsv(input_file, names)
data.date = pandas.to_datetime(data.date)
return data
def initialize_daily_count(self, course_date_count_data):
"""
Reorganize a course-date-count data table to index by date.
Args:
Pandas dataframe with one row per course_id and
columns for the date and count of the offset.
Returns:
Pandas dataframe with one column per course_id, and
indexed rows for the date. Counts are set to zero for
dates that are missing.
"""
data = course_date_count_data.pivot(
index='date',
columns='course_id',
values='count',
)
# Complete the range of data to include all days between
# the dates of the first and last events.
date_range = pandas.date_range(min(data.index), max(data.index))
data = data.reindex(date_range)
data = data.fillna(0)
return data
def add_offsets_to_daily_count(self, count_by_day, offsets):
"""
Add offsets to a dataframe in-place.
Args:
count_by_day: Pandas dataframe with one column per course_id, and
indexed rows for the date.
offsets: Pandas dataframe with one row per course_id and
columns for the date and count of the offset.
"""
for _, (course_id, date, count) in offsets.iterrows():
if course_id in count_by_day.columns:
# The offsets are computed to beginning of that day. We
# add them to the counts by the end of that day to
# get the correct count for the day.
count_by_day.loc[date, course_id] += count
else:
# We have an offset for the course, but no current
# counts. Create an course entry, set the offset, and set
# all subsequent counts to zero.
count_by_day.loc[date, course_id] = count
count_by_day.loc[count_by_day.index > date, course_id] = 0
# Flag values before the offset day with NaN,
# since they are not "available".
not_available = count_by_day.index < date
count_by_day.loc[not_available, course_id] = numpy.NaN
def calculate_total_enrollment(self, count_by_day, offsets=None):
"""
Accumulate enrollment changes per day to find total enrollment per day.
Args:
count_by_day: Pandas dataframe with one column per course_id, and
indexed rows for the date. Counts are net changes in enrollment
during the day for each course.
offsets: Pandas dataframe with one row per course_id and
columns for the date and count of the offset. The offset
for a course is used to provide total enrollment counts
at a point in time right before the timeframe covered by count_by_day.
"""
if offsets is not None:
self.add_offsets_to_daily_count(count_by_day, offsets)
# Calculate the cumulative sum per day of the input.
# Entries with NaN stay NaN.
# At this stage only the data prior to the offset should contain NaN.
cumulative_sum = count_by_day.cumsum()
return cumulative_sum
def select_weekly_values(self, daily_values, start, weeks):
"""
Sample daily values on a weekly basis.
Args:
daily_values: Pandas dataframe with one column per course_id, and
indexed rows for the date.
start: last day to request.
weeks: number of weeks to sample (including the last day)
"""
# List the dates of the last day of each week requested.
days = [start - timedelta(i * 7) for i in reversed(xrange(0, weeks))]
# Sample the cumulative data on the requested days.
# Result is NaN if there is no data available for that date.
results = daily_values.loc[days]
return results
class EnrollmentsByWeek(luigi.Task, CourseEnrollmentCountMixin):
"""Calculates cumulative enrollments per week per course.
Parameters:
source: Location of daily enrollments per date. The format is a hadoop
tsv file, with fields course_id, date and count.
destination: Location of the resulting report. The output format is a
excel csv file with course_id and one column per requested week.
offsets: Location of seed values for each course. The format is a
hadoop tsv file, with fields course_id, date and offset.
date: End date of the last week requested.
weeks: Number of weeks from the end date to request.
Output:
Excel CSV file with one row per course. The columns are
the cumulative enrollments counts for each week requested.
"""
def requires(self):
results = {
'source': CourseEnrollmentChangesPerDay(
name=self.name,
src=self.src,
dest=self.destination,
include=self.include,
manifest=self.manifest,
mapreduce_engine=self.mapreduce_engine,
lib_jar=self.lib_jar,
n_reduce_tasks=self.n_reduce_tasks
)
}
if self.offsets:
results.update({'offsets': ExternalURL(self.offsets)})
if self.statuses:
results.update({'statuses': ExternalURL(self.statuses)})
return results
def output(self):
return get_target_from_url(url_path_join(self.destination, "weekly_enrollments_{0}.csv".format(self.name)))
def run(self):
# Load the data into pandas dataframes
daily_enrollment_changes = self.read_source()
offsets = self.read_offsets()
daily_enrollment_totals = self.calculate_total_enrollment(daily_enrollment_changes, offsets)
# Sample the cumulative data on the requested days.
# Result is NaN if there is no data available for that date.
weekly_enrollment_totals = self.select_weekly_values(
daily_enrollment_totals,
self.date,
self.weeks
)
statuses = self.read_statuses()
with self.output().open('w') as output_file:
self.save_output(weekly_enrollment_totals, statuses, output_file)
def read_source(self):
"""
Read source into a pandas DataFrame.
Returns:
Pandas dataframe with one column per course_id. Indexed
for the time interval available in the source data.
"""
with self.input()['source'].open('r') as input_file:
course_date_count_data = self.read_course_date_count_tsv(input_file)
data = self.initialize_daily_count(course_date_count_data)
return data
def read_offsets(self):
"""
Read offsets into a pandas DataFrame.
Returns:
Pandas dataframe with one row per course_id and
columns for the date and count of the offset.
Returns None if no offset was specified.
"""
data = None
if self.input().get('offsets'):
with self.input()['offsets'].open('r') as offset_file:
data = self.read_course_date_count_tsv(offset_file)
return data
def read_statuses(self):
"""
Read course statuses into a pandas DataFrame.
Returns:
Pandas dataframe with one row per course_id and
a column for the status. The status should
be either "past", "current" or "new". The index
for the DataFrame is the course_id.
Returns None if no statuses was specified.
"""
data = None
names = ['course_id', 'status']
if self.input().get('statuses'):
with self.input()['statuses'].open('r') as status_file:
data = read_tsv(status_file, names)
data = data.set_index('course_id')
return data
def save_output(self, results, statuses, output_file):
results = results.transpose()
# List of fieldnames for the report
fieldnames = ['status', 'course_id', 'org_id'] + list(results.columns)
writer = csv.DictWriter(output_file, fieldnames)
writer.writerow(dict((k, k) for k in fieldnames)) # Write header
def format_counts(counts_dict):
for k, v in counts_dict.iteritems():
yield k, '-' if numpy.isnan(v) else int(v)
for course_id, series in results.iterrows():
# Course_id is passed throughout these reports as a
# utf8-encoded str, so it must be locally converted to
# unicode before parsing for org.
org_id = get_org_id_for_course(course_id.decode('utf-8'))
values = {
'course_id': course_id,
'status': self.get_status_for_course(course_id, statuses),
'org_id': org_id or '-',
}
by_week_values = format_counts(series.to_dict())
values.update(by_week_values)
writer.writerow(values)
def get_status_for_course(self, course_id, statuses):
'''
Args:
course_id(str): The identifier for the course. Should be formatted
as <org_id>/<name>/<run>.
statuses(pandas.DataFrame): A pandas DataFrame mapping course_ids
to course statuses. It is expected to be indexed on course_id.
Returns:
The course's status as a string.
'''
if statuses is None or course_id not in statuses.index:
return '-'
return statuses.loc[course_id]['status']
|
agpl-3.0
|
Alex-Ian-Hamilton/sunpy
|
sunpy/map/sources/soho.py
|
1
|
8975
|
"""SOHO Map subclass definitions"""
from __future__ import absolute_import, print_function, division
#pylint: disable=W0221,W0222,E1101,E1121
__author__ = "Keith Hughitt"
__email__ = "[email protected]"
import numpy as np
from matplotlib import colors
from astropy.units import Quantity
from astropy.visualization import PowerStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from sunpy.map import GenericMap
from sunpy.sun import constants
from sunpy.sun import sun
from sunpy.cm import cm
__all__ = ['EITMap', 'LASCOMap', 'MDIMap']
def _dsunAtSoho(date, rad_d, rad_1au=None):
"""Determines the distance to the Sun from SOhO following
d_{\sun,Object} =
D_{\sun\earth} \frac{\tan(radius_{1au}[rad])}{\tan(radius_{d}[rad])}
though tan x ~ x for x << 1
d_{\sun,Object} =
D_{\sun\eart} \frac{radius_{1au}[rad]}{radius_{d}[rad]}
since radius_{1au} and radius_{d} are dividing each other we can use [arcsec]
instead.
---
TODO: Does this apply just to observations on the same Earth-Sun line?
If not it can be moved outside here.
"""
if not rad_1au:
rad_1au = sun.solar_semidiameter_angular_size(date)
dsun = sun.sunearth_distance(date) * constants.au * (rad_1au / rad_d)
# return scalar value not astropy.quantity
return dsun.value
class EITMap(GenericMap):
"""SOHO EIT Image Map.
SOHO EIT is an extreme ultraviolet (EUV) imager able to image the solar
transition region and inner corona in four selected bandpasses,
171 (Fe IX/X), 195 (Fe XII), 284 (Fe XV), and 304 (He II) Angstrom.
SOHO was launched on 2 December 2 1995 into a sun-synchronous orbit and
primary mission operations for SOHO EIT ended at the end of July 2010.
References
----------
* `SOHO Mission Page <http://sohowww.nascom.nasa.gov>`_
* `SOHO EIT Instrument Page <http://umbra.nascom.nasa.gov/eit/>`_
* `SOHO EIT User Guide <http://umbra.nascom.nasa.gov/eit/eit_guide/>`_
"""
def __init__(self, data, header, **kwargs):
GenericMap.__init__(self, data, header, **kwargs)
# Fill in some missing info
self.meta['detector'] = "EIT"
self.meta['waveunit'] = "Angstrom"
self._fix_dsun()
self._nickname = self.detector
self.plot_settings['cmap'] = cm.get_cmap(self._get_cmap_name())
self.plot_settings['norm'] = ImageNormalize(stretch=PowerStretch(0.5))
@property
def rsun_obs(self):
"""
Returns the solar radius as measured by EIT in arcseconds.
"""
return Quantity(self.meta['solar_r'] * self.meta['cdelt1'], 'arcsec')
def _fix_dsun(self):
self.meta['dsun_obs'] = _dsunAtSoho(self.date, self.rsun_obs)
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
"""Determines if header corresponds to an EIT image"""
return header.get('instrume') == 'EIT'
class LASCOMap(GenericMap):
"""SOHO LASCO Image Map
The Large Angle and Spectrometric COronagraph (LASCO) is a set of three
Lyot-type coronagraphs (C1, C2, and C3) that image the solar corona from
1.1 to 32 solar radii.
The C1 images rom 1.1 to 3 solar radii. The C2 telescope images the corona
from 2 to 6 solar radii, overlaping the outer field-of-view of C1 from 2 to
3 solar radii. The C3 telescope extends the field-of-view to 32 solar radii.
SOHO was launched on 2 December 2 1995 into a sun-synchronous orbit.
References
----------
* `SOHO Mission Page <http://sohowww.nascom.nasa.gov>`_
* `SOHO LASCO Instrument Page <http://lasco-www.nrl.navy.mil>`_
* `SOHO LASCO Fits Header keywords <http://lasco-www.nrl.navy.mil/index.php?p=content/keywords>`_
* `SOHO LASCO User Guide <http://lasco-www.nrl.navy.mil/index.php?p=content/handbook/hndbk>`_
"""
def __init__(self, data, header, **kwargs):
GenericMap.__init__(self, data, header, **kwargs)
self.meta['CUNIT1'] = self.meta['CUNIT1'].lower()
self.meta['CUNIT2'] = self.meta['CUNIT2'].lower()
# Fill in some missing or broken info
datestr = "{date}T{time}".format(date=self.meta.get('date-obs',
self.meta.get('date_obs')
),
time=self.meta.get('time-obs',
self.meta.get('time_obs')
)
)
self.meta['date-obs'] = datestr
# If non-standard Keyword is present, correct it too, for compatibility.
if 'date_obs' in self.meta:
self.meta['date_obs'] = self.meta['date-obs']
self.meta['wavelnth'] = np.nan
self.meta['waveunit'] = 'nm'
self._nickname = self.instrument + "-" + self.detector
self.plot_settings['cmap'] = cm.get_cmap('soholasco{det!s}'.format(det=self.detector[1]))
self.plot_settings['norm'] = ImageNormalize(stretch=PowerStretch(0.5))
@property
def measurement(self):
"""
Returns the type of data taken.
"""
# TODO: This needs to do more than white-light. Should give B, pB, etc.
return "white-light"
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
"""Determines if header corresponds to an LASCO image."""
return header.get('instrume') == 'LASCO'
class MDIMap(GenericMap):
"""
SOHO MDI Image Map
The Michelson Doppler Imager (MDI) is a white light refracting telescope
which feeds sunlight through a series of filters onto a CCD camera. Two
tunable Michelson interformeters define a 94 mAngstrom bandpass that can be
tuned across the Ni 6768 Angstrom solar absorption line.
MDI measures line-of-sight motion (Dopplergrams), magnetic field
(magnetograms), and brightness images of the full solar disk at several
resolutions (4 arc-second to very low resolution) and a fixed selected
region in higher resolution (1.2 arc-second).
SOHO was launched on 2 December 2 1995 into a sun-synchronous orbit and
SOHO MDI ceased normal science observations on 12 April 2011.
References
----------
* `SOHO Mission Page <http://sohowww.nascom.nasa.gov>`_
* `SOHO MDI Instrument Page <http://soi.stanford.edu>`_
* `SOHO MDI Fits Header keywords <http://soi.stanford.edu/sssc/doc/keywords.html>`_
* `SOHO MDI Instrument Paper <http://soi.stanford.edu/sssc/doc/SP_paper_1995/MDI_SP_paper_1995.pdf>`_
"""
def __init__(self, data, header, **kwargs):
GenericMap.__init__(self, data, header, **kwargs)
# Fill in some missing or broken info
self.meta['detector'] = "MDI"
self._fix_dsun()
self.meta['wavelnth'] = np.nan
self.meta['waveunit'] = 'nm'
self._nickname = self.detector + " " + self.measurement
vmin = np.nanmin(self.data)
vmax = np.nanmax(self.data)
if abs(vmin) > abs(vmax):
self.plot_settings['norm'] = colors.Normalize(-vmin, vmin)
else:
self.plot_settings['norm'] = colors.Normalize(-vmax, vmax)
@property
def measurement(self):
"""
Returns the type of data in the map.
"""
return "magnetogram" if self.meta.get('content', " ").find('Mag') != -1 else "continuum"
def _fix_dsun(self):
""" Solar radius in arc-seconds at 1 au
previous value radius_1au = 959.644
radius = constants.average_angular_size
There are differences in the keywords in the test FITS data and in
the Helioviewer JPEG2000 files. In both files, MDI stores the
the radius of the Sun in image pixels, and a pixel scale size.
The names of these keywords are different in the FITS versus the
JP2 file. The code below first looks for the keywords relevant to
a FITS file, and then a JPEG2000 file. For more information on
MDI FITS header keywords please go to http://soi.stanford.edu/,
http://soi.stanford.edu/data/ and
http://soi.stanford.edu/magnetic/Lev1.8/ .
"""
scale = self.meta.get('xscale', self.meta.get('cdelt1'))
radius_in_pixels = self.meta.get('r_sun', self.meta.get('radius'))
radius = scale * radius_in_pixels
self.meta['radius'] = radius
if not radius:
# radius = sun.angular_size(self.date)
self.meta['dsun_obs'] = constants.au
else:
self.meta['dsun_obs'] = _dsunAtSoho(self.date, radius)
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
"""Determines if header corresponds to an MDI image"""
return header.get('instrume') == 'MDI' or header.get('camera') == 'MDI'
|
bsd-2-clause
|
keflavich/scikit-image
|
doc/examples/plot_line_hough_transform.py
|
14
|
4465
|
r"""
=============================
Straight line Hough transform
=============================
The Hough transform in its simplest form is a `method to detect straight lines
<http://en.wikipedia.org/wiki/Hough_transform>`__.
In the following example, we construct an image with a line intersection. We
then use the Hough transform to explore a parameter space for straight lines
that may run through the image.
Algorithm overview
------------------
Usually, lines are parameterised as :math:`y = mx + c`, with a gradient
:math:`m` and y-intercept `c`. However, this would mean that :math:`m` goes to
infinity for vertical lines. Instead, we therefore construct a segment
perpendicular to the line, leading to the origin. The line is represented by the
length of that segment, :math:`r`, and the angle it makes with the x-axis,
:math:`\theta`.
The Hough transform constructs a histogram array representing the parameter
space (i.e., an :math:`M \times N` matrix, for :math:`M` different values of the
radius and :math:`N` different values of :math:`\theta`). For each parameter
combination, :math:`r` and :math:`\theta`, we then find the number of non-zero
pixels in the input image that would fall close to the corresponding line, and
increment the array at position :math:`(r, \theta)` appropriately.
We can think of each non-zero pixel "voting" for potential line candidates. The
local maxima in the resulting histogram indicates the parameters of the most
probably lines. In our example, the maxima occur at 45 and 135 degrees,
corresponding to the normal vector angles of each line.
Another approach is the Progressive Probabilistic Hough Transform [1]_. It is
based on the assumption that using a random subset of voting points give a good
approximation to the actual result, and that lines can be extracted during the
voting process by walking along connected components. This returns the beginning
and end of each line segment, which is useful.
The function `probabilistic_hough` has three parameters: a general threshold
that is applied to the Hough accumulator, a minimum line length and the line gap
that influences line merging. In the example below, we find lines longer than 10
with a gap less than 3 pixels.
References
----------
.. [1] C. Galamhos, J. Matas and J. Kittler,"Progressive probabilistic
Hough transform for line detection", in IEEE Computer Society
Conference on Computer Vision and Pattern Recognition, 1999.
.. [2] Duda, R. O. and P. E. Hart, "Use of the Hough Transformation to
Detect Lines and Curves in Pictures," Comm. ACM, Vol. 15,
pp. 11-15 (January, 1972)
"""
from skimage.transform import (hough_line, hough_line_peaks,
probabilistic_hough_line)
from skimage.feature import canny
from skimage import data
import numpy as np
import matplotlib.pyplot as plt
# Construct test image
image = np.zeros((100, 100))
# Classic straight-line Hough transform
idx = np.arange(25, 75)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
h, theta, d = hough_line(image)
fig, ax = plt.subplots(1, 3, figsize=(8, 4))
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Input image')
ax[0].axis('image')
ax[1].imshow(np.log(1 + h),
extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]),
d[-1], d[0]],
cmap=plt.cm.gray, aspect=1/1.5)
ax[1].set_title('Hough transform')
ax[1].set_xlabel('Angles (degrees)')
ax[1].set_ylabel('Distance (pixels)')
ax[1].axis('image')
ax[2].imshow(image, cmap=plt.cm.gray)
rows, cols = image.shape
for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
y1 = (dist - cols * np.cos(angle)) / np.sin(angle)
ax[2].plot((0, cols), (y0, y1), '-r')
ax[2].axis((0, cols, rows, 0))
ax[2].set_title('Detected lines')
ax[2].axis('image')
# Line finding, using the Probabilistic Hough Transform
image = data.camera()
edges = canny(image, 2, 1, 25)
lines = probabilistic_hough_line(edges, threshold=10, line_length=5, line_gap=3)
fig2, ax = plt.subplots(1, 3, figsize=(8, 3))
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Input image')
ax[0].axis('image')
ax[1].imshow(edges, cmap=plt.cm.gray)
ax[1].set_title('Canny edges')
ax[1].axis('image')
ax[2].imshow(edges * 0)
for line in lines:
p0, p1 = line
ax[2].plot((p0[0], p1[0]), (p0[1], p1[1]))
ax[2].set_title('Probabilistic Hough')
ax[2].axis('image')
plt.show()
|
bsd-3-clause
|
iproduct/course-social-robotics
|
11-dnn-keras/venv/Lib/site-packages/pandas/plotting/_matplotlib/misc.py
|
2
|
13023
|
import random
from typing import TYPE_CHECKING, Dict, List, Optional, Set
import matplotlib.lines as mlines
import matplotlib.patches as patches
import numpy as np
from pandas._typing import Label
from pandas.core.dtypes.missing import notna
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.style import get_standard_colors
from pandas.plotting._matplotlib.tools import create_subplots, set_ticks_props
if TYPE_CHECKING:
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from pandas import DataFrame, Series
def scatter_matrix(
frame: "DataFrame",
alpha=0.5,
figsize=None,
ax=None,
grid=False,
diagonal="hist",
marker=".",
density_kwds=None,
hist_kwds=None,
range_padding=0.05,
**kwds,
):
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notna(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# GH 14855
kwds.setdefault("edgecolors", "none")
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.0
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in enumerate(df.columns):
for j, b in enumerate(df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == "hist":
ax.hist(values, **hist_kwds)
elif diagonal in ("kde", "density"):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(
df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds
)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _get_marker_compat(marker):
if marker not in mlines.lineMarkers:
return "o"
return marker
def radviz(
frame: "DataFrame",
class_column,
ax: Optional["Axes"] = None,
color=None,
colormap=None,
**kwds,
) -> "Axes":
import matplotlib.pyplot as plt
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot: Dict[Label, List[List]] = {}
colors = get_standard_colors(
num_colors=len(classes), colormap=colormap, color_type="random", color=color
)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array(
[
(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m)) for i in range(m)]
]
)
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(
to_plot[kls][0],
to_plot[kls][1],
color=colors[i],
label=pprint_thing(kls),
**kwds,
)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor="none"))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor="gray"))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(
xy[0] - 0.025, xy[1] - 0.025, name, ha="right", va="top", size="small"
)
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(
xy[0] - 0.025,
xy[1] + 0.025,
name,
ha="right",
va="bottom",
size="small",
)
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(
xy[0] + 0.025, xy[1] - 0.025, name, ha="left", va="top", size="small"
)
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(
xy[0] + 0.025, xy[1] + 0.025, name, ha="left", va="bottom", size="small"
)
ax.axis("equal")
return ax
def andrews_curves(
frame: "DataFrame",
class_column,
ax: Optional["Axes"] = None,
samples: int = 200,
color=None,
colormap=None,
**kwds,
) -> "Axes":
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / np.sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(
coeffs[:, 0, np.newaxis] * np.sin(trig_args)
+ coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0,
)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-np.pi, np.pi, samples)
used_legends: Set[str] = set()
color_values = get_standard_colors(
num_colors=len(classes), colormap=colormap, color_type="random", color=color
)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-np.pi, np.pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc="upper right")
ax.grid()
return ax
def bootstrap_plot(
series: "Series",
fig: Optional["Figure"] = None,
size: int = 50,
samples: int = 500,
**kwds,
) -> "Figure":
import matplotlib.pyplot as plt
# TODO: is the failure mentioned below still relevant?
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array(
[(min(sampling) + max(sampling)) * 0.5 for sampling in samplings]
)
if fig is None:
fig = plt.figure()
x = list(range(samples))
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
plt.tight_layout()
return fig
def parallel_coordinates(
frame: "DataFrame",
class_column,
cols=None,
ax: Optional["Axes"] = None,
color=None,
use_columns=False,
xticks=None,
colormap=None,
axvlines: bool = True,
axvlines_kwds=None,
sort_labels: bool = False,
**kwds,
) -> "Axes":
import matplotlib.pyplot as plt
if axvlines_kwds is None:
axvlines_kwds = {"linewidth": 1, "color": "black"}
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends: Set[str] = set()
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError("Columns must be numeric to be used as xticks")
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError("xticks specified must be numeric")
elif len(xticks) != ncols:
raise ValueError("Length of xticks must match number of columns")
x = xticks
else:
x = list(range(ncols))
if ax is None:
ax = plt.gca()
color_values = get_standard_colors(
num_colors=len(classes), colormap=colormap, color_type="random", color=color
)
if sort_labels:
classes = sorted(classes)
color_values = sorted(color_values)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, **axvlines_kwds)
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc="upper right")
ax.grid()
return ax
def lag_plot(
series: "Series", lag: int = 1, ax: Optional["Axes"] = None, **kwds
) -> "Axes":
# workaround because `c='b'` is hardcoded in matplotlib's scatter method
import matplotlib.pyplot as plt
kwds.setdefault("c", plt.rcParams["patch.facecolor"])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel(f"y(t + {lag})")
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(
series: "Series", ax: Optional["Axes"] = None, **kwds
) -> "Axes":
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = [r(loc) for loc in x]
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle="--", color="grey")
ax.axhline(y=z95 / np.sqrt(n), color="grey")
ax.axhline(y=0.0, color="black")
ax.axhline(y=-z95 / np.sqrt(n), color="grey")
ax.axhline(y=-z99 / np.sqrt(n), linestyle="--", color="grey")
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if "label" in kwds:
ax.legend()
ax.grid()
return ax
|
gpl-2.0
|
daodaoliang/neural-network-animation
|
matplotlib/tests/test_coding_standards.py
|
9
|
11641
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from fnmatch import fnmatch
import os
import sys
from nose.tools import assert_equal
from nose.plugins.skip import SkipTest
try:
import pep8
except ImportError:
HAS_PEP8 = False
else:
HAS_PEP8 = pep8.__version__ > '1.4.5'
import matplotlib
EXTRA_EXCLUDE_FILE = os.path.join(os.path.dirname(__file__),
'.pep8_test_exclude.txt')
EXCLUDE_FILES = ['_delaunay.py',
'_image.py',
'_tri.py',
'_backend_agg.py',
'_tkagg.py',
'ft2font.py',
'_cntr.py',
'_png.py',
'_path.py',
'ttconv.py',
'_gtkagg.py',
'_backend_gdk.py',
'pyparsing*',
'_qhull.py',
'_macosx.py']
PEP8_ADDITIONAL_IGNORE = ['E111',
'E112',
'E113',
'E121',
'E122',
'E123',
'E124',
'E125',
'E126',
'E127',
'E128',
'E129',
'E131',
'E265']
EXPECTED_BAD_FILES = ['*/matplotlib/__init__.py',
'*/matplotlib/_cm.py',
'*/matplotlib/_mathtext_data.py',
'*/matplotlib/_pylab_helpers.py',
'*/matplotlib/afm.py',
'*/matplotlib/artist.py',
'*/matplotlib/axis.py',
'*/matplotlib/backend_bases.py',
'*/matplotlib/bezier.py',
'*/matplotlib/cbook.py',
'*/matplotlib/collections.py',
'*/matplotlib/dviread.py',
'*/matplotlib/font_manager.py',
'*/matplotlib/fontconfig_pattern.py',
'*/matplotlib/gridspec.py',
'*/matplotlib/legend.py',
'*/matplotlib/legend_handler.py',
'*/matplotlib/mathtext.py',
'*/matplotlib/mlab.py',
'*/matplotlib/path.py',
'*/matplotlib/patheffects.py',
'*/matplotlib/pylab.py',
'*/matplotlib/pyplot.py',
'*/matplotlib/rcsetup.py',
'*/matplotlib/stackplot.py',
'*/matplotlib/texmanager.py',
'*/matplotlib/transforms.py',
'*/matplotlib/type1font.py',
'*/matplotlib/widgets.py',
'*/matplotlib/testing/decorators.py',
'*/matplotlib/testing/image_util.py',
'*/matplotlib/testing/noseclasses.py',
'*/matplotlib/testing/jpl_units/Duration.py',
'*/matplotlib/testing/jpl_units/Epoch.py',
'*/matplotlib/testing/jpl_units/EpochConverter.py',
'*/matplotlib/testing/jpl_units/StrConverter.py',
'*/matplotlib/testing/jpl_units/UnitDbl.py',
'*/matplotlib/testing/jpl_units/UnitDblConverter.py',
'*/matplotlib/testing/jpl_units/UnitDblFormatter.py',
'*/matplotlib/testing/jpl_units/__init__.py',
'*/matplotlib/tri/triinterpolate.py',
'*/matplotlib/tests/test_axes.py',
'*/matplotlib/tests/test_bbox_tight.py',
'*/matplotlib/tests/test_delaunay.py',
'*/matplotlib/tests/test_dviread.py',
'*/matplotlib/tests/test_image.py',
'*/matplotlib/tests/test_legend.py',
'*/matplotlib/tests/test_lines.py',
'*/matplotlib/tests/test_mathtext.py',
'*/matplotlib/tests/test_rcparams.py',
'*/matplotlib/tests/test_simplification.py',
'*/matplotlib/tests/test_spines.py',
'*/matplotlib/tests/test_streamplot.py',
'*/matplotlib/tests/test_subplots.py',
'*/matplotlib/tests/test_tightlayout.py',
'*/matplotlib/tests/test_transforms.py',
'*/matplotlib/tests/test_triangulation.py',
'*/matplotlib/compat/subprocess.py',
'*/matplotlib/backends/__init__.py',
'*/matplotlib/backends/backend_agg.py',
'*/matplotlib/backends/backend_cairo.py',
'*/matplotlib/backends/backend_cocoaagg.py',
'*/matplotlib/backends/backend_gdk.py',
'*/matplotlib/backends/backend_gtk.py',
'*/matplotlib/backends/backend_gtk3.py',
'*/matplotlib/backends/backend_gtk3cairo.py',
'*/matplotlib/backends/backend_gtkagg.py',
'*/matplotlib/backends/backend_gtkcairo.py',
'*/matplotlib/backends/backend_macosx.py',
'*/matplotlib/backends/backend_mixed.py',
'*/matplotlib/backends/backend_pgf.py',
'*/matplotlib/backends/backend_ps.py',
'*/matplotlib/backends/backend_svg.py',
'*/matplotlib/backends/backend_template.py',
'*/matplotlib/backends/backend_tkagg.py',
'*/matplotlib/backends/backend_wx.py',
'*/matplotlib/backends/backend_wxagg.py',
'*/matplotlib/backends/tkagg.py',
'*/matplotlib/backends/windowing.py',
'*/matplotlib/backends/qt_editor/formlayout.py',
'*/matplotlib/sphinxext/ipython_console_highlighting.py',
'*/matplotlib/sphinxext/ipython_directive.py',
'*/matplotlib/sphinxext/mathmpl.py',
'*/matplotlib/sphinxext/only_directives.py',
'*/matplotlib/sphinxext/plot_directive.py',
'*/matplotlib/projections/__init__.py',
'*/matplotlib/projections/geo.py',
'*/matplotlib/projections/polar.py']
if HAS_PEP8:
class StandardReportWithExclusions(pep8.StandardReport):
#; A class attribute to store the exception exclusion file patterns.
expected_bad_files = EXPECTED_BAD_FILES
#: A class attribute to store the lines of failing tests.
_global_deferred_print = []
#: A class attribute to store patterns which have seen exceptions.
matched_exclusions = set()
def get_file_results(self):
# If the file had no errors, return self.file_errors
# (which will be 0).
if not self._deferred_print:
return self.file_errors
# Iterate over all of the patterns, to find a possible exclusion.
# If the filename is to be excluded, go ahead and remove the
# counts that self.error added.
for pattern in self.expected_bad_files:
if fnmatch(self.filename, pattern):
self.matched_exclusions.add(pattern)
# invert the error method's counters.
for _, _, code, _, _ in self._deferred_print:
self.counters[code] -= 1
if self.counters[code] == 0:
self.counters.pop(code)
self.messages.pop(code)
self.file_errors -= 1
self.total_errors -= 1
return self.file_errors
# mirror the content of StandardReport, only storing the output to
# file rather than printing. This could be a feature request for
# the PEP8 tool.
self._deferred_print.sort()
for line_number, offset, code, text, _ in self._deferred_print:
self._global_deferred_print.append(
self._fmt % {'path': self.filename,
'row': self.line_offset + line_number,
'col': offset + 1, 'code': code,
'text': text})
return self.file_errors
def assert_pep8_conformance(module=matplotlib, exclude_files=EXCLUDE_FILES,
extra_exclude_file=EXTRA_EXCLUDE_FILE,
pep8_additional_ignore=PEP8_ADDITIONAL_IGNORE):
"""
Tests the matplotlib codebase against the "pep8" tool.
Users can add their own excluded files (should files exist in the
local directory which is not in the repository) by adding a
".pep8_test_exclude.txt" file in the same directory as this test.
The file should be a line separated list of filenames/directories
as can be passed to the "pep8" tool's exclude list.
"""
if not HAS_PEP8:
raise SkipTest('The pep8 tool is required for this test')
# to get a list of bad files, rather than the specific errors, add
# "reporter=pep8.FileReport" to the StyleGuide constructor.
pep8style = pep8.StyleGuide(quiet=False,
reporter=StandardReportWithExclusions)
reporter = pep8style.options.reporter
# Extend the number of PEP8 guidelines which are not checked.
pep8style.options.ignore = (pep8style.options.ignore +
tuple(pep8_additional_ignore))
# Support for egg shared object wrappers, which are not PEP8 compliant,
# nor part of the matplotlib repository.
# DO NOT ADD FILES *IN* THE REPOSITORY TO THIS LIST.
pep8style.options.exclude.extend(exclude_files)
# Allow users to add their own exclude list.
if extra_exclude_file is not None and os.path.exists(extra_exclude_file):
with open(extra_exclude_file, 'r') as fh:
extra_exclude = [line.strip() for line in fh if line.strip()]
pep8style.options.exclude.extend(extra_exclude)
result = pep8style.check_files([os.path.dirname(module.__file__)])
if reporter is StandardReportWithExclusions:
msg = ("Found code syntax errors (and warnings):\n"
"{0}".format('\n'.join(reporter._global_deferred_print)))
else:
msg = "Found code syntax errors (and warnings)."
assert_equal(result.total_errors, 0, msg)
# If we've been using the exclusions reporter, check that we didn't
# exclude files unnecessarily.
if reporter is StandardReportWithExclusions:
unexpectedly_good = sorted(set(reporter.expected_bad_files) -
reporter.matched_exclusions)
if unexpectedly_good:
raise ValueError('Some exclude patterns were unnecessary as the '
'files they pointed to either passed the PEP8 '
'tests or do not point to a file:\n '
'{}'.format('\n '.join(unexpectedly_good)))
def test_pep8_conformance():
assert_pep8_conformance()
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
mit
|
philippjfr/bokeh
|
sphinx/source/docs/user_guide/examples/extensions_example_latex.py
|
5
|
2681
|
""" The LaTex example was derived from: http://matplotlib.org/users/usetex.html
"""
import numpy as np
from bokeh.models import Label
from bokeh.plotting import figure, show
JS_CODE = """
import {Label, LabelView} from "models/annotations/label"
export class LatexLabelView extends LabelView
render: () ->
#--- Start of copied section from ``Label.render`` implementation
ctx = @plot_view.canvas_view.ctx
# Here because AngleSpec does units tranform and label doesn't support specs
switch @model.angle_units
when "rad" then angle = -1 * @model.angle
when "deg" then angle = -1 * @model.angle * Math.PI/180.0
if @model.x_units == "data"
vx = @xscale.compute(@model.x)
else
vx = @model.x
sx = @canvas.vx_to_sx(vx)
if @model.y_units == "data"
vy = @yscale.compute(@model.y)
else
vy = @model.y
sy = @canvas.vy_to_sy(vy)
if @model.panel?
panel_offset = @_get_panel_offset()
sx += panel_offset.x
sy += panel_offset.y
#--- End of copied section from ``Label.render`` implementation
# Must render as superpositioned div (not on canvas) so that KaTex
# css can properly style the text
@_css_text(ctx, "", sx + @model.x_offset, sy - @model.y_offset, angle)
# ``katex`` is loaded into the global window at runtime
# katex.renderToString returns a html ``span`` element
katex.render(@model.text, @el, {displayMode: true})
export class LatexLabel extends Label
type: 'LatexLabel'
default_view: LatexLabelView
"""
class LatexLabel(Label):
"""A subclass of the Bokeh built-in `Label` that supports rendering
LaTex using the KaTex typesetting library.
Only the render method of LabelView is overloaded to perform the
text -> latex (via katex) conversion. Note: ``render_mode="canvas``
isn't supported and certain DOM manipulation happens in the Label
superclass implementation that requires explicitly setting
`render_mode='css'`).
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.css"]
__implementation__ = JS_CODE
x = np.arange(0.0, 1.0 + 0.01, 0.01)
y = np.cos(2*2*np.pi*x) + 2
p = figure(title="LaTex Demonstration", plot_width=500, plot_height=500)
p.line(x, y)
# Note: must set ``render_mode="css"``
latex = LatexLabel(text="f = \sum_{n=1}^\infty\\frac{-e^{i\pi}}{2^n}!",
x=35, y=445, x_units='screen', y_units='screen',
render_mode='css', text_font_size='16pt',
background_fill_color='#ffffff')
p.add_layout(latex)
show(p)
|
bsd-3-clause
|
xapharius/mrEnsemble
|
Engine/src/simulation/benchmarker/dataset_loader.py
|
2
|
8193
|
'''
Created on May 3, 2015
@author: xapharius
'''
import os
import cv2
import numpy as np
import pandas as pd
import utils.imageutils as imgutils
import utils.numpyutils as nputils
import matplotlib.pyplot as plt
from simulation.sampler.bootstrap_sampler import BootstrapSampler
_data_folder = os.getcwd().split("Engine")[0] + "data/"
_wildfire_folder = "/media/xapharius/Storage/Documents/wildfire/"
class RawDataset(object):
def __init__(self, training_inputs, training_targets,
validation_inputs=None, validation_targets=None,
name="unnamed"):
'''
IF VALIDATION LEFT NONE, will split training 70/30
'''
if validation_inputs is None:
split = int(0.7 * len(training_inputs))
self.validation_inputs = training_inputs[split:]
self.validation_targets = training_targets[split:]
self.training_inputs = training_inputs[:split]
self.training_targets = training_targets[:split]
else:
self.training_inputs = training_inputs
self.training_targets = training_targets
self.validation_inputs = validation_inputs
self.validation_targets = validation_targets
self.training_obs = len(self.training_inputs)
self.validation_obs = len(self.validation_inputs)
self.total_obs = self.training_obs + self.validation_obs
self.input_var = self.training_inputs[0].shape
self.target_var = self.training_targets[0].shape
self.name = name
def get_datasets(data_type):
'''
Filters for type and task to return right set of datasets
@return generator (name, dataset)
'''
if data_type == "numerical":
return _gen_numerical_datasets()
if data_type == "image":
return _gen_image_datasets()
assert "Invalid type or task"
def _gen_numerical_datasets():
loaders = [_get_bank, _get_diabetic_retinopathy, _get_letter_recognition, _get_pendigits]
for loader in loaders:
yield loader()
def _gen_image_datasets():
loaders = [_get_mnist]
for loader in loaders:
yield loader()
def _gen_wildfire_datasets():
loaders = [_get_wildfire_diff, _get_wildfire_div, _get_wildfire_img2]
for loader in loaders:
yield loader()
def _get_bank():
'''
Bank-Marketing dataset "bank-full.csv"
https://archive.ics.uci.edu/ml/datasets/Bank+Marketing
Binary Outcome
'''
data = pd.read_csv(_data_folder + "bank-marketing/bank-full.csv", sep=";")
data = data.replace(["no", "yes"], [0, 1])
job = pd.get_dummies(data.job, prefix="job").drop("job_unknown", axis=1)
data.drop("job", axis=1, inplace=True)
data = pd.concat([data, job], axis=1)
data.education = data.education.replace(['unknown', 'primary', 'secondary', 'tertiary'], range(4))
data.marital = data.marital.replace(['unknown', 'single', 'married', 'divorced'], range(4))
data.month = data.month.replace(['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec'], range(1,13))
contact = pd.get_dummies(data.contact, prefix="contact").drop("contact_unknown", axis=1)
data.drop("contact", axis=1, inplace=True)
data = pd.concat([data, contact], axis=1)
outcome = pd.get_dummies(data.poutcome, prefix="poutcome").drop("poutcome_unknown", axis=1)
data.drop("poutcome", axis=1, inplace=True)
data = pd.concat([data, outcome], axis=1)
# put y at end
cols = data.columns.tolist()
cols.remove("y")
cols += ["y"]
data = data[cols]
data=data.values
return RawDataset(name="bank-marketing", training_inputs=data[:,:-1],
training_targets=data[:,-1:])
def _get_breast_cancer():
'''
Wisconsin Breast Cancer dataset
https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Original%29
Binary Outcome
'''
data = pd.read_csv(_data_folder + "breast-cancer/breast-cancer-wisconsin.data", sep=",")
data = data.replace(["?"], [np.NaN])
data.dropna(inplace=True)
data.label = data.label.replace([2,4], [0,1])
data.drop("ID", axis=1, inplace=True)
data = data.values.astype(float)
return RawDataset(name="breast-cancer", training_inputs=data[:,:-1],
training_targets=data[:,-1:])
def _get_banknote_auth():
'''
Banknote Authentification
https://archive.ics.uci.edu/ml/datasets/banknote+authentication#
Binary Outcome
'''
data = pd.read_csv(_data_folder + "banknote-auth/banknote_authentication.data", sep=",")
data = data.values.astype(float)
return RawDataset(name="banknote-auth", training_inputs=data[:,:-1],
training_targets=data[:,-1:])
def _get_diabetic_retinopathy():
'''
Diabetic Retinopathy Debrecen Data Set Data Set
https://archive.ics.uci.edu/ml/datasets/Diabetic+Retinopathy+Debrecen+Data+Set
'''
data = pd.read_csv(_data_folder + "diabetic-retinopathy/diabetic-retinopathy.data", sep=",", header=None)
data = data.values
return RawDataset(name="diabetic-retinopathy", training_inputs=data[:,:-1],
training_targets=data[:,-1:])
def _get_letter_recognition():
'''
Letter Recognition Data Set
https://archive.ics.uci.edu/ml/datasets/Letter+Recognition
'''
data = pd.read_csv(_data_folder + "letter-recognition/letter-recognition.data", sep=",", header=None)
letters = [l for l in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
data = data.replace(letters, range(len(letters)))
# put y at end
cols = data.columns.tolist()
cols.remove(0)
cols += [0]
data = data[cols]
data = data.values.astype(float)
return RawDataset(name="letter-recognition", training_inputs=data[:,:-1],
training_targets=data[:,-1:])
def _get_pendigits():
'''
Pendigits
https://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits
'''
data = pd.read_csv(_data_folder + "pendigits/pendigits.data", sep=",", header=None)
data = data.values.astype(float)
return RawDataset(name="pendigits", training_inputs=data[:,:-1],
training_targets=data[:,-1:])
def _get_mnist(nr_obs=-1):
'''
'''
tin, ttar = imgutils.load_mnist_digits(_data_folder + "mnist-digits/train-images.idx3-ubyte",
_data_folder + "mnist-digits/train-labels.idx1-ubyte", nr_obs)
vin, vtar = imgutils.load_mnist_digits(_data_folder + "mnist-digits/t10k-images.idx3-ubyte",
_data_folder + "mnist-digits/t10k-labels.idx1-ubyte", nr_obs)
return RawDataset(name="mnist", training_inputs=tin, training_targets=ttar,
validation_inputs=vin, validation_targets=vtar)
def _get_binary_mnist():
tin, ttar = imgutils.load_mnist_digits(_data_folder + "mnist-digits/train-images.idx3-ubyte",
_data_folder + "mnist-digits/train-labels.idx1-ubyte", 5000)
ttar = ttar.argmax(axis=1)
indices = (ttar == 0) | (ttar == 1)
tin = tin[indices]
ttar = ttar[indices]
ttar = ttar.reshape(len(ttar), 1)
return RawDataset(name="binary_mnist", training_inputs=tin, training_targets=ttar)
def _get_wildfire(set_name):
'''
'''
files = pd.read_csv(_wildfire_folder + "multi_files.csv", header=None, names=["file", "label"])
labels = files.label.values[:, np.newaxis]
data = []
for file_name in files.file:
img = cv2.imread(_wildfire_folder + set_name + "/" + file_name + ".png", -1)
img = cv2.resize(img, (256, 256))
data.append(img)
data = np.array(data)
return RawDataset(name="wildfire_"+set_name, training_inputs=data, training_targets=labels)
def _get_wildfire_diff():
return _get_wildfire("diff")
def _get_wildfire_div():
return _get_wildfire("div")
def _get_wildfire_img2():
return _get_wildfire("img2")
"""
rawdataset = _get_wildfire("diff")
bs = BootstrapSampler(0.03, with_replacement=False)
bs.bind_data(rawdataset.training_inputs, rawdataset.training_targets)
inp, lab = bs.sample()
for i in range(10):
plt.figure()
plt.imshow(inp[i], cmap="gray")
print lab[:10]
plt.show()
"""
|
mit
|
Andrew-McNab-UK/DIRAC
|
Core/Utilities/Graphs/CurveGraph.py
|
4
|
4787
|
########################################################################
# $HeadURL$
########################################################################
""" CurveGraph represents simple line graphs with markers.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphUtilities import darkenColor, to_timestamp, PrettyDateLocator, \
PrettyDateFormatter, PrettyScalarFormatter
from matplotlib.lines import Line2D
from matplotlib.dates import date2num
import datetime
class CurveGraph( PlotBase ):
"""
The CurveGraph class is a straightforward line graph with markers
"""
def __init__(self,data,ax,prefs,*args,**kw):
PlotBase.__init__(self,data,ax,prefs,*args,**kw)
def draw( self ):
PlotBase.draw(self)
self.x_formatter_cb(self.ax)
if self.gdata.isEmpty():
return None
start_plot = 0
end_plot = 0
if "starttime" in self.prefs and "endtime" in self.prefs:
start_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['starttime'])))
end_plot = date2num( datetime.datetime.fromtimestamp(to_timestamp(self.prefs['endtime'])))
labels = self.gdata.getLabels()
labels.reverse()
# If it is a simple plot, no labels are used
# Evaluate the most appropriate color in this case
if self.gdata.isSimplePlot():
labels = [('SimplePlot',0.)]
color = self.prefs.get('plot_color','Default')
if color.find('#') != -1:
self.palette.setColor('SimplePlot',color)
else:
labels = [(color,0.)]
tmp_max_y = []
tmp_min_y = []
tmp_x = []
for label,num in labels:
xdata = []
ydata = []
xerror = []
yerror = []
color = self.palette.getColor(label)
plot_data = self.gdata.getPlotNumData(label)
for key, value, error in plot_data:
if value is None:
continue
tmp_x.append( key )
tmp_max_y.append( value + error )
tmp_min_y.append( value - error )
xdata.append( key )
ydata.append( value )
xerror.append( 0. )
yerror.append( error )
linestyle = self.prefs.get( 'linestyle', '-' )
marker = self.prefs.get( 'marker', 'o' )
markersize = self.prefs.get( 'markersize', 8. )
markeredgewidth = self.prefs.get( 'markeredgewidth', 1. )
if not self.prefs.get( 'error_bars', False ):
line = Line2D( xdata, ydata, color=color, linewidth=1., marker=marker, linestyle=linestyle,
markersize=markersize, markeredgewidth=markeredgewidth,
markeredgecolor = darkenColor( color ) )
self.ax.add_line( line )
else:
self.ax.errorbar( xdata, ydata, color=color, linewidth=2., marker=marker, linestyle=linestyle,
markersize=markersize, markeredgewidth=markeredgewidth,
markeredgecolor = darkenColor( color ), xerr = xerror, yerr = yerror,
ecolor=color )
ymax = max( tmp_max_y )
ymax *= 1.1
ymin = min( tmp_min_y, 0. )
ymin *= 1.1
if self.prefs.has_key('log_yaxis'):
ymin = 0.001
xmax=max(tmp_x)*1.1
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ymin = self.prefs.get( 'ymin', ymin )
ymax = self.prefs.get( 'ymax', ymax )
xmin = self.prefs.get( 'xmin', xmin )
xmax = self.prefs.get( 'xmax', xmax )
self.ax.set_xlim( xmin=xmin, xmax=xmax )
self.ax.set_ylim( ymin=ymin, ymax=ymax )
if self.gdata.key_type == 'time':
if start_plot and end_plot:
self.ax.set_xlim( xmin=start_plot, xmax=end_plot)
else:
self.ax.set_xlim( xmin=min(tmp_x), xmax=max(tmp_x))
def x_formatter_cb( self, ax ):
if self.gdata.key_type == "string":
smap = self.gdata.getStringMap()
reverse_smap = {}
for key, val in smap.items():
reverse_smap[val] = key
ticks = smap.values()
ticks.sort()
ax.set_xticks( [i+.5 for i in ticks] )
ax.set_xticklabels( [reverse_smap[i] for i in ticks] )
labels = ax.get_xticklabels()
ax.grid( False )
if self.log_xaxis:
xmin = 0.001
else:
xmin = 0
ax.set_xlim( xmin=xmin,xmax=len(ticks) )
elif self.gdata.key_type == "time":
dl = PrettyDateLocator()
df = PrettyDateFormatter( dl )
ax.xaxis.set_major_locator( dl )
ax.xaxis.set_major_formatter( df )
ax.xaxis.set_clip_on(False)
sf = PrettyScalarFormatter( )
ax.yaxis.set_major_formatter( sf )
else:
return None
|
gpl-3.0
|
BridgitD/school-dropout-predictions
|
pipeline/cook-pipeline-2.py
|
1
|
12551
|
'''
Christine Cook
Machine Learning
'''
from IPython import embed
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
import random
from sklearn.cross_validation import KFold
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import accuracy_score, precision_score
from sklearn.metrics import recall_score, precision_recall_curve
from sklearn.metrics import roc_auc_score, f1_score
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import time
def getSumStats(data):
desc = data.iloc[:,1:].describe().T
desc.drop([desc.columns[4], desc.columns[6]], axis=1, inplace=True)
mode = data.iloc[:,1:].mode()
desc = pd.concat([desc.T, mode])
desc.rename({0:'mode', '50%':'median'}, inplace=True)
desc.to_csv("data_sumstats.csv")
def cleanData(data, cohort):
if cohort == 1:
dropList = ['g6_tardyr','g6_school_name', 'g7_school_name', 'g8_school_name', 'g9_school_name', 'g10_school_name', 'g11_school_name', 'g12_school_name','g6_year', 'g6_gradeexp', 'g6_grade', 'g6_wcode', 'g7_year', 'g7_gradeexp', 'g7_grade', 'g7_wcode', 'g8_year', 'g8_gradeexp', 'g8_grade', 'g8_wcode', 'g9_year', 'g9_gradeexp', 'g9_grade', 'g9_wcode', 'g10_year', 'g10_gradeexp', 'g10_grade', 'g10_wcode', 'g11_year', 'g11_gradeexp', 'g11_grade', 'g11_wcode', 'g12_year', 'g12_gradeexp', 'g12_grade', 'g12_wcode']
data.drop(dropList, axis=1, inplace=True)
elif cohort == 2:
dropList = ['g6_school_name', 'g7_school_name', 'g8_school_name', 'g9_school_name', 'g10_school_name', 'g11_school_name', 'g12_school_name','g6_year', 'g6_grade', 'g6_wcode', 'g7_year', 'g7_grade', 'g7_wcode', 'g8_year', 'g8_grade', 'g8_wcode', 'g9_year', 'g9_grade', 'g9_wcode', 'g10_year', 'g10_grade', 'g10_wcode', 'g11_year', 'g11_grade', 'g11_wcode', 'g12_year', 'g12_grade', 'g12_wcode']
data.drop(dropList, axis=1, inplace=True)
##clean birth year/mo
data.loc[:, 'g11_byrmm']= data.loc[:,'g11_byrmm'].astype(str)
data.loc[:, 'birth_year'] = data['g11_byrmm'].str[0:4]
data.loc[:, 'birth_mo'] = data['g11_byrmm'].str[4:6]
birthday_cols = ['g11_byrmm', 'g12_byrmm', 'g10_byrmm', 'g9_byrmm', 'g8_byrmm', 'g7_byrmm', 'g6_byrmm']
for col in birthday_cols:
data.loc[:, col]= data.loc[:,col].astype(str)
data['birth_year'].fillna(data[col].str[0:4], inplace=True)
data['birth_mo'].fillna(data[col].str[4:6], inplace=True)
data.drop('id', axis=1, inplace=True)
data.drop(birthday_cols, axis=1, inplace=True)
#clean gender
data['gender'] = data['g11_gender']
gender_cols = ['g12_gender', 'g11_gender', 'g10_gender', 'g9_gender', 'g8_gender', 'g7_gender', 'g6_gender']
for col in gender_cols:
data['gender'] = data['gender'].fillna(data[col], inplace=True)
data.drop(gender_cols, axis=1, inplace=True)
#clean retained
retained_cols = ['g11_retained', 'g12_retained', 'g9_newmcps', 'g10_newmcps', 'g11_newmcps', 'g12_newmcps', 'g9_newus', 'g10_newus', 'g11_newus', 'g12_newus']
for col in retained_cols:
data[col] = data[col].notnull()
#create flag if a given student is missing a year's worth of data
grade_id = ['g6_pid', 'g7_pid', 'g8_pid', 'g9_pid', 'g10_pid', 'g11_pid', 'g12_pid']
year = 6
for g in grade_id:
col_name = 'g' + str(year) + '_missing'
data[col_name] = data[g].isnull()
data.drop(g, axis=1, inplace=True)
year+=1
return data
def makeDummies(data):
school_ids = [col for col in data.columns if 'school_id' in col]
data[school_ids] = data.loc[:,school_ids].astype(str, copy=False)
data = pd.get_dummies(data, dummy_na=True)
return data
def chooseCols(data, pred_grade):
#drop 'future' vars
for x in range(pred_grade, 13):
dropVars = [col for col in data.columns if str(x) in col]
dropoutVar = 'g' + str(x) + '_dropout'
if dropoutVar in dropVars:
dropVars.remove(dropoutVar)
data.drop(dropVars, axis=1, inplace=True)
#drop irrelevent d/o vars
colList = [col for col in data.columns if 'dropout' in col]
doVar = 'g' + str(pred_grade) + '_dropout'
colList.remove(doVar)
data.drop(colList, axis=1, inplace=True)
return data
def imputeData(data):
#change msam to missing is msam_NA==1
nanList = ['g6_g6msam_nan', 'g7_g7msam_nan', 'g8_g8msam_nan', 'g9_g8msam_nan']
msamList = [[ 'g6_g6msam_Advanced', 'g6_g6msam_Basic', 'g6_g6msam_Proficient'], ['g7_g7msam_Advanced', 'g7_g7msam_Basic', 'g7_g7msam_Proficient'], ['g8_g8msam_Advanced', 'g8_g8msam_Basic', 'g8_g8msam_Proficient'],['g9_g8msam_Advanced', 'g9_g8msam_Basic', 'g9_g8msam_Proficient']]
for x in range(0,len(nanList)):
nacol = nanList[x]
colList = msamList[x]
for col in colList:
data.loc[data[nacol] == 1, col] = np.nan
#pred missing data using any available data
wordList = ['absrate', 'mapr', 'msam_Advanced', 'msam_Basic', 'msam_Proficient', 'mobility', 'nsusp', 'mpa', 'tardyr', 'psatm', 'psatv', 'retained']
for word in wordList:
colList = [col for col in data.columns if word in col]
rowMean = data[colList].mean(axis=1)
for col in colList:
data[col].fillna(rowMean, inplace=True)
return data
def limitRows(data, pred_grade):
#get rid of previous dropouts
for x in range(6, pred_grade-1):
data = data[data.g6_dropout !=1]
data = data[data.g7_dropout !=1]
data = data[data.g8_dropout !=1]
data = data[data.g9_dropout !=1]
if pred_grade >= 10:
data = data[data.g10_dropout !=1]
if pred_grade >= 11:
data = data[data.g11_dropout !=1]
return data
def makeFinite(data, pred_grade):
#keep finite
colList = [col for col in data.columns if 'dropout' in col]
doVar = 'g' + str(pred_grade) + '_dropout'
colList.remove(doVar)
data.drop(colList, axis=1, inplace=True)
data = data.dropna(axis=0)
return data
def makeChartDiscrete(data, col, title):
data_copy = data
data_copy = data_copy.dropna()
data_max = data_copy.iloc[:,col].max()
step = (data_max/50)
if step < 1:
bins=list(range(0, int(data_max), 1))
else:
bins=list(range(0, int(data_max), step))
pl.figure()
pl.title(title)
pl.xlabel(title)
pl.ylabel('Frequency')
bins = pl.hist(data_copy.iloc[:,col], bins)
pl.savefig(title)
def makeChartContinuous(data, col, title):
y_vals = data.iloc[:,col]
data_id = data.iloc[:,0]
pl.figure()
pl.title(title)
pl.xlabel(title)
pl.ylabel('Frequency')
pl.scatter(y_vals,data_id)
pl.savefig(title)
def imputeMean(data):
data.fillna(value=data.mean(), inplace=True)
return data
def plotROC(name, probs, test_data):
fpr, tpr, thresholds = roc_curve(test_data['g12_dropout'], probs)
roc_auc = auc(fpr, tpr)
pl.clf()
pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
pl.plot([0, 1], [0, 1], 'k--')
pl.xlim([0.0, 1.05])
pl.ylim([0.0, 1.05])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title(name)
pl.legend(loc="lower right")
pl.savefig(name)
def clf_cv_loop(classifier, x_data, y_data):
poss_class_y_pred = []
poss_times = []
for k in classifier['kwords_list']:
t0 = time.time()
poss_class_y_pred.append(run_cv(x_data, y_data, classifier['class'], k))
t1 = time.time()
total = t1-t0
poss_times.append(total)
return poss_class_y_pred, poss_times
def run_cv(x, y, clf_class, *args, **kwargs):
embed()
# Construct a kfolds object
kf = KFold(len(y),n_folds=5,shuffle=True)
y_pred = y.copy()
y_pred_proba = y.copy()
# Iterate through folds
for train_index, test_index in kf:
x_train = x.ix[train_index]
x_test = x.ix[test_index]
y_train = y.ix[train_index]
x_train = Imputer(strategy = 'median').fit_transform(x_train)
x_test = Imputer(strategy = 'median').fit_transform(x_test)
# Initialize a classifier with key word arguments
clf = clf_class(**kwargs)
clf.fit(x_train,y_train)
y_pred[test_index] = clf.predict(x_test)
y_pred_proba[test_index] = clf.predict_proba(x_test)
return y_pred, y_pred_proba
def eval_clfs(y_pred, y_data, evals, classifier, classifier_name, poss_times, y_pred_proba):
#embed()
f = open('./output/'+classifier_name+'_evals_table.csv', 'w')
f.write('parameters\ttime\t')
for k, l in evals.iteritems():
f.write(k+'\t')
f.write('\n')
for k in range(len(y_pred)):
f.write(str(classifier['kwords_list'][k])+'\t')
f.write(str(posslen_times[k])+'\t')
for l, m in evals.iteritems():
if l == 'precision_recall_curve':
eval_temp = m(y_data, y_pred_proba)
f.write(str(eval_temp)+'\t')
else:
eval_temp = m(y_data, y_pred[k])
f.write(str(eval_temp)+'\t')
f.write('\n')
f.close()
def main():
#read data
data = pd.read_csv('/mnt/data2/education_data/mcps/DATA_DO_NOT_UPLOAD/cohort1_all.csv', index_col=False)
#clean data
data = cleanData(data, 1)
#make dummies
data = makeDummies(data)
#limit rows to valid
data = limitRows(data, 12)
#shrink dataset size
data = chooseCols(data, 12)
#impute data
data = imputeData(data)
#make data finite
#data = makeFinite(data, 12)
data.dropna(axis=0, inplace=True)
#define features
features = data.columns.tolist()
features.remove('g12_dropout')
#define classifiers
classifiers = {
#'LogisticRegression': {'class': LogisticRegression},
#'KNeighborsClassifier': {'class': KNeighborsClassifier},
'DecisionTreeClassifier': {'class': DecisionTreeClassifier}}
#'LinearSVC': {'class': LinearSVC},
#'RandomForestClassifier': {'class': RandomForestClassifier},
#'AdaBoostClassifier': {'class': AdaBoostClassifier},
#'BaggingClassifier': {'class': BaggingClassifier}}
#define eval metrics
evals = {'accuracy_score': accuracy_score,
'precision_score': precision_score,
'recall_score': recall_score,
'f1_score': f1_score,
'roc_auc_score': roc_auc_score,
'precision_recall_curve': precision_recall_curve}
#Creating lists to loop over for parameters
#for i in range(10):
#temp = classifiers['KNeighborsClassifier'].get('kwords_list', [])
#temp.append({'n_neighbors': i})
#classifiers['KNeighborsClassifier']['kwords_list'] = temp
for i in range(1,6,1):
temp = classifiers['DecisionTreeClassifier'].get('kwords_list', [])
temp.append({'max_depth': i})
classifiers['DecisionTreeClassifier']['kwords_list'] = temp
'''
for i in range(2,22,2):
temp = classifiers['RandomForestClassifier'].get('kwords_list', [])
temp.append({'n_estimators': i})
classifiers['RandomForestClassifier']['kwords_list'] = temp
for i in range(50, 110, 10):
temp = classifiers['AdaBoostClassifier'].get('kwords_list', [])
temp.append({'n_estimators': i})
classifiers['AdaBoostClassifier']['kwords_list'] = temp
for i in range(6, 16, 2):
temp = classifiers['BaggingClassifier'].get('kwords_list', [])
temp.append({'n_estimators': i})
classifiers['BaggingClassifier']['kwords_list'] = temp
#classifiers['LogisticRegression']['kwords_list'] = [{'C': 1.0}]
#classifiers['LSVC']['kwords_list'] = [{'C': 1.0}]
'''
#define x, y
x_data = data[features]
y_data = data['g12_dropout']
#run clf
for i, j in classifiers.iteritems():
y_pred, poss_times, y_pred_proba = clf_cv_loop(j, x_data, y_data)
eval_clfs(y_pred, y_data, evals, j, i, poss_times, y_pred_proba)
print "End"
main()
|
mit
|
cbmoore/statsmodels
|
statsmodels/datasets/fertility/data.py
|
26
|
2511
|
#! /usr/bin/env python
"""World Bank Fertility Data."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is distributed according to the World Bank terms of use. See SOURCE."""
TITLE = """World Bank Fertility Data"""
SOURCE = """
This data has been acquired from
The World Bank: Fertility rate, total (births per woman): World Development Indicators
At the following URL: http://data.worldbank.org/indicator/SP.DYN.TFRT.IN
The sources for these statistics are listed as
(1) United Nations Population Division. World Population Prospects
(2) United Nations Statistical Division. Population and Vital Statistics Repot (various years)
(3) Census reports and other statistical publications from national statistical offices
(4) Eurostat: Demographic Statistics
(5) Secretariat of the Pacific Community: Statistics and Demography Programme
(6) U.S. Census Bureau: International Database
The World Bank Terms of Use can be found at the following URL
http://go.worldbank.org/OJC02YMLA0
"""
DESCRSHORT = """Total fertility rate represents the number of children that would be born to a woman if she were to live to the end of her childbearing years and bear children in accordance with current age-specific fertility rates."""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """
::
This is panel data in wide-format
Number of observations: 219
Number of variables: 58
Variable name definitions:
Country Name
Country Code
Indicator Name - The World Bank Series indicator
Indicator Code - The World Bank Series code
1960 - 2013 - The fertility rate for the given year
"""
import numpy as np
import pandas as pd
from statsmodels.datasets import utils as du
from os.path import dirname, abspath, join
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
names = data.columns.tolist()
dtype = zip(names, ['a45', 'a3', 'a40', 'a14'] + ['<f8'] * 54)
data = map(tuple, data.values.tolist())
dataset = du.Dataset(data=np.array(data, dtype=dtype), names=names)
return dataset
def load_pandas():
data = _get_data()
return du.Dataset(data=data)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = pd.read_csv(join(filepath, 'fertility.csv'))
return data
|
bsd-3-clause
|
zaxliu/deepnap
|
experiments/kdd-exps/experiment_DynaQtable_Feb6_2008.py
|
1
|
4624
|
# System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:4])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Dyna_QAgent(DynaMixin, QAgent):
def __init__(self, **kwargs):
super(Dyna_QAgent, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'dmW'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - No Phi
phi_length = 0
dim_state = (1, 1, 3)
range_state = ((((0, 10), (0, 10), (0, 10)),),)
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 10, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 5
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
agent = Dyna_QAgent(
env_model=env_model, num_sim=num_sim,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
|
bsd-3-clause
|
vascotenner/holoviews
|
holoviews/core/element.py
|
1
|
25281
|
import operator
from itertools import groupby
import numpy as np
import param
from .dimension import Dimension, Dimensioned, ViewableElement
from .layout import Composable, Layout, NdLayout
from .ndmapping import OrderedDict, NdMapping
from .overlay import Overlayable, NdOverlay, CompositeOverlay
from .spaces import HoloMap, GridSpace
from .tree import AttrTree
from .util import (dimension_sort, get_param_values, dimension_sanitizer,
unique_array)
class Element(ViewableElement, Composable, Overlayable):
"""
Element is the baseclass for all ViewableElement types, with an x- and
y-dimension. Subclasses should define the data storage in the
constructor, as well as methods and properties, which define how
the data maps onto the x- and y- and value dimensions.
"""
group = param.String(default='Element', constant=True)
def hist(self, dimension=None, num_bins=20, bin_range=None,
adjoin=True, individually=True, **kwargs):
"""
The hist method generates a histogram to be adjoined to the
Element in an AdjointLayout. By default the histogram is
computed along the first value dimension of the Element,
however any dimension may be selected. The number of bins and
the bin_ranges and any kwargs to be passed to the histogram
operation may also be supplied.
"""
from ..operation import histogram
if not isinstance(dimension, list): dimension = [dimension]
hists = []
for d in dimension[::-1]:
hist = histogram(self, num_bins=num_bins, bin_range=bin_range,
adjoin=False, individually=individually,
dimension=d, **kwargs)
hists.append(hist)
if adjoin:
layout = self
for didx in range(len(dimension)):
layout = layout << hists[didx]
elif len(dimension) > 1:
layout = Layout(hists)
else:
layout = hists[0]
return layout
#======================#
# Subclassable methods #
#======================#
def __getitem__(self, key):
if key is ():
return self
else:
raise NotImplementedError("%s currently does not support getitem" %
type(self).__name__)
@classmethod
def collapse_data(cls, data, function=None, kdims=None, **kwargs):
"""
Class method to collapse a list of data matching the
data format of the Element type. By implementing this
method HoloMap can collapse multiple Elements of the
same type. The kwargs are passed to the collapse
function. The collapse function must support the numpy
style axis selection. Valid function include:
np.mean, np.sum, np.product, np.std, scipy.stats.kurtosis etc.
Some data backends also require the key dimensions
to aggregate over.
"""
raise NotImplementedError("Collapsing not implemented for %s." % cls.__name__)
def closest(self, coords):
"""
Class method that returns the exact keys for a given list of
coordinates. The supplied bounds defines the extent within
which the samples are drawn and the optional shape argument is
the shape of the numpy array (typically the shape of the .data
attribute) when applicable.
"""
return coords
def sample(self, samples=[], **sample_values):
"""
Base class signature to demonstrate API for sampling Elements.
To sample an Element supply either a list of sampels or keyword
arguments, where the key should match an existing key dimension
on the Element.
"""
raise NotImplementedError
def reduce(self, dimensions=[], function=None, **reduce_map):
"""
Base class signature to demonstrate API for reducing Elements,
using some reduce function, e.g. np.mean, which is applied
along a particular Dimension. The dimensions and reduce functions
should be passed as keyword arguments or as a list of dimensions
and a single function.
"""
raise NotImplementedError
def _reduce_map(self, dimensions, function, reduce_map):
if dimensions and reduce_map:
raise Exception("Pass reduced dimensions either as an argument "
"or as part of the kwargs not both.")
if len(set(reduce_map.values())) > 1:
raise Exception("Cannot define reduce operations with more than "
"one function at a time.")
sanitized_dict = {dimension_sanitizer(kd): kd
for kd in self.dimensions('key', True)}
if reduce_map:
reduce_map = reduce_map.items()
if dimensions:
reduce_map = [(d, function) for d in dimensions]
elif not reduce_map:
reduce_map = [(d, function) for d in self.kdims]
reduced = [(d.name if isinstance(d, Dimension) else d, fn)
for d, fn in reduce_map]
sanitized = [(sanitized_dict.get(d, d), fn) for d, fn in reduced]
grouped = [(fn, [dim for dim, _ in grp]) for fn, grp in groupby(sanitized, lambda x: x[1])]
return grouped[0]
def table(self, datatype=None):
"""
Converts the data Element to a Table, optionally may
specify a supported data type. The default data types
are 'numpy' (for homogeneous data), 'dataframe', and
'dictionary'.
"""
if datatype and not isinstance(datatype, list):
datatype = [datatype]
from ..element import Table
return Table(self, **(dict(datatype=datatype) if datatype else {}))
def dframe(self, dimensions=None):
import pandas as pd
column_names = dimensions if dimensions else self.dimensions(label=True)
dim_vals = OrderedDict([(dim, self[dim]) for dim in column_names])
return pd.DataFrame(dim_vals)
def mapping(self, kdims=None, vdims=None, **kwargs):
length = len(self)
if not kdims: kdims = self.kdims
if kdims:
keys = zip(*[self.dimension_values(dim.name)
for dim in self.kdims])
else:
keys = [()]*length
if not vdims: vdims = self.vdims
if vdims:
values = zip(*[self.dimension_values(dim.name)
for dim in vdims])
else:
values = [()]*length
data = zip(keys, values)
overrides = dict(kdims=kdims, vdims=vdims, **kwargs)
return NdElement(data, **dict(get_param_values(self), **overrides))
def array(self, dimensions=[]):
if dimensions:
dims = [self.get_dimension(d) for d in dimensions]
else:
dims = [d for d in self.kdims + self.vdims if d != 'Index']
columns, types = [], []
for dim in dims:
column = self.dimension_values(dim)
columns.append(column)
types.append(column.dtype.kind)
if len(set(types)) > 1:
columns = [c.astype('object') for c in columns]
return np.column_stack(columns)
class Tabular(Element):
"""
Baseclass to give an NdMapping objects an API to generate a
table representation.
"""
__abstract = True
@property
def rows(self):
return len(self) + 1
@property
def cols(self):
return len(self.dimensions())
def pprint_cell(self, row, col):
"""
Get the formatted cell value for the given row and column indices.
"""
ndims = self.ndims
if col >= self.cols:
raise Exception("Maximum column index is %d" % self.cols-1)
elif row >= self.rows:
raise Exception("Maximum row index is %d" % self.rows-1)
elif row == 0:
if col >= ndims:
if self.vdims:
return str(self.vdims[col - ndims])
else:
return ''
return str(self.kdims[col])
else:
dim = self.get_dimension(col)
values = self[dim.name]
return dim.pprint_value(values[row-1])
def cell_type(self, row, col):
"""
Returns the cell type given a row and column index. The common
basic cell types are 'data' and 'heading'.
"""
return 'heading' if row == 0 else 'data'
class Element2D(Element):
extents = param.Tuple(default=(None, None, None, None),
doc="""Allows overriding the extents
of the Element in 2D space defined as four-tuple
defining the (left, bottom, right and top) edges.""")
class NdElement(NdMapping, Tabular):
"""
An NdElement is an Element that stores the contained data as
an NdMapping. In addition to the usual multi-dimensional keys
of NdMappings, NdElements also support multi-dimensional
values. The values held in a multi-valued NdElement are tuples,
where each component of the tuple maps to a column as described
by the value dimensions parameter.
In other words, the data of a NdElement are partitioned into two
groups: the columns based on the key and the value columns that
contain the components of the value tuple.
One feature of NdElements is that they support an additional level of
index over NdMappings: the last index may be a column name or a
slice over the column names (using alphanumeric ordering).
"""
group = param.String(default='NdElement', constant=True, doc="""
The group is used to describe the NdElement.""")
vdims = param.List(default=[Dimension('Data')], doc="""
The dimension description(s) of the values held in data tuples
that map to the value columns of the table.
Note: String values may be supplied in the constructor which
will then be promoted to Dimension objects.""")
_deep_indexable = False
_sorted = False
def __init__(self, data=None, **params):
if isinstance(data, list) and all(np.isscalar(el) for el in data):
data = (((k,), (v,)) for k, v in enumerate(data))
if isinstance(data, Element):
params = dict(get_param_values(data), **params)
mapping = data if isinstance(data, NdElement) else data.mapping()
data = mapping.data
if 'kdims' not in params:
params['kdims'] = mapping.kdims
elif 'Index' not in params['kdims']:
params['kdims'] = ['Index'] + params['kdims']
if 'vdims' not in params:
params['vdims'] = mapping.vdims
kdims = params.get('kdims', self.kdims)
if (data is not None and not isinstance(data, NdMapping)
and 'Index' not in kdims):
params['kdims'] = ['Index'] + list(kdims)
data_items = data.items() if isinstance(data, dict) else data
data = [((i,)+((k,) if np.isscalar(k) else k), v) for i, (k, v) in enumerate(data_items)]
super(NdElement, self).__init__(data, **params)
@property
def shape(self):
return (len(self), len(self.dimensions()))
def reindex(self, kdims=None, vdims=None, force=False):
"""
Create a new object with a re-ordered set of dimensions.
Allows converting key dimensions to value dimensions
and vice versa.
"""
if vdims is None:
if kdims is None:
return super(NdElement, self).reindex(force=force)
else:
vdims = [d for d in self.vdims if d not in kdims]
elif kdims is None:
kdims = [d for d in self.dimensions() if d not in vdims]
if 'Index' not in kdims: kdims = ['Index'] + kdims
key_dims = [self.get_dimension(k) for k in kdims]
val_dims = [self.get_dimension(v) for v in vdims]
kidxs = [(i, k in self.kdims, self.get_dimension_index(k))
for i, k in enumerate(kdims)]
vidxs = [(i, v in self.kdims, self.get_dimension_index(v))
for i, v in enumerate(vdims)]
getter = operator.itemgetter(0)
items = []
for k, v in self.data.items():
if key_dims:
_, key = zip(*sorted(((i, k[idx] if iskey else v[idx-self.ndims])
for i, iskey, idx in kidxs), key=getter))
else:
key = ()
if val_dims:
_, val = zip(*sorted(((i, k[idx] if iskey else v[idx-self.ndims])
for i, iskey, idx in vidxs), key=getter))
else:
val = ()
items.append((key, val))
reindexed = self.clone(items, kdims=key_dims, vdims=val_dims)
if not force and len(reindexed) != len(items):
raise KeyError("Cannot reindex as not all resulting keys are unique.")
return reindexed
def _add_item(self, key, value, sort=True, update=True):
if np.isscalar(value):
value = (value,)
elif not isinstance(value, NdElement):
value = tuple(value)
if len(value) != len(self.vdims) and not isinstance(value, NdElement):
raise ValueError("%s values must match value dimensions"
% type(self).__name__)
super(NdElement, self)._add_item(key, value, sort, update)
def _filter_columns(self, index, col_names):
"Returns the column names specified by index (which may be a slice)"
if isinstance(index, slice):
cols = [col for col in sorted(col_names)]
if index.start:
cols = [col for col in cols if col > index.start]
if index.stop:
cols = [col for col in cols if col < index.stop]
cols = cols[::index.step] if index.step else cols
elif isinstance(index, (set, list)):
nomatch = [val for val in index if val not in col_names]
if nomatch:
raise KeyError("No columns with dimension labels %r" % nomatch)
cols = [col for col in col_names if col in index]
elif index not in col_names:
raise KeyError("No column with dimension label %r" % index)
else:
cols= [index]
if cols==[]:
raise KeyError("No columns selected in the given slice")
return cols
def _filter_data(self, subtable, vdims):
"""
Filters value dimensions in the supplied NdElement data.
"""
if isinstance(subtable, tuple): subtable = {(): subtable}
col_names = self.dimensions('value', label=True)
cols = self._filter_columns(vdims, col_names)
indices = [col_names.index(col) for col in cols]
vdims = [self.vdims[i] for i in indices]
items = [(k, tuple(v[i] for i in indices))
for (k,v) in subtable.items()]
return subtable.clone(items, vdims=vdims)
def __getitem__(self, args):
"""
In addition to usual NdMapping indexing, NdElements can be indexed
by column name (or a slice over column names)
"""
if isinstance(args, np.ndarray) and args.dtype.kind == 'b':
return NdMapping.__getitem__(self, args)
elif args in self.dimensions():
return self.dimension_values(args)
if not isinstance(args, tuple): args = (args,)
ndmap_index = args[:self.ndims]
val_index = args[self.ndims:]
if val_index:
if len(val_index) == 1 and val_index[0] in self.vdims:
val_index = val_index[0]
else:
reindexed = self.reindex(self.kdims+list(self.vdims))
subtable = reindexed[args]
if not val_index or not isinstance(val_index, tuple):
subtable = NdMapping.__getitem__(self, ndmap_index)
if isinstance(subtable, NdElement) and all(np.isscalar(idx) for idx in ndmap_index[1:]):
if len(subtable) == 1:
subtable = list(subtable.data.values())[0]
if not isinstance(subtable, NdElement):
if len(self.vdims) > 1:
subtable = self.__class__([(args[1:], subtable)], label=self.label,
kdims=self.kdims[1:], vdims=self.vdims)
else:
if np.isscalar(subtable):
return subtable
return subtable[0]
if val_index and not isinstance(val_index, tuple):
return self._filter_data(subtable, args[-1])
else:
return subtable
def sort(self, by=[]):
if not isinstance(by, list): by = [by]
if not by: by = range(self.ndims)
indexes = [self.get_dimension_index(d) for d in by]
return self.clone(dimension_sort(self.data, self.kdims, self.vdims,
False, indexes, self._cached_index_values))
def sample(self, samples=[]):
"""
Allows sampling of the Table with a list of samples.
"""
sample_data = []
offset = 0
for i, sample in enumerate(samples):
sample = (sample,) if np.isscalar(sample) else sample
value = self[(slice(None),)+sample]
if isinstance(value, NdElement):
for idx, (k, v) in enumerate(value.data.items()):
sample_data.append(((i+offset+idx,)+k, v))
offset += idx
else:
sample_data.append(((i+offset,)+sample, (value,)))
return self.clone(sample_data)
def aggregate(self, dimensions, function, **kwargs):
"""
Allows aggregating.
"""
rows = []
grouped = self.groupby(dimensions) if len(dimensions) else HoloMap({(): self}, kdims=[])
for k, group in grouped.data.items():
reduced = []
for vdim in self.vdims:
data = group[vdim.name]
if isinstance(function, np.ufunc):
reduced.append(function.reduce(data, **kwargs))
else:
reduced.append(function(data, **kwargs))
rows.append((k, tuple(reduced)))
return self.clone(rows, kdims=grouped.kdims)
def dimension_values(self, dim, expanded=True, flat=True):
dim = self.get_dimension(dim, strict=True)
value_dims = self.dimensions('value', label=True)
if dim.name in value_dims:
index = value_dims.index(dim.name)
vals = np.array([v[index] for v in self.data.values()])
return vals if expanded else unique_array(vals)
else:
return NdMapping.dimension_values(self, dim.name,
expanded, flat)
def values(self):
" Returns the values of all the elements."
values = self.data.values()
if len(self.vdims) == 1:
return [v[0] for v in values]
return list(values)
class Element3D(Element2D):
extents = param.Tuple(default=(None, None, None,
None, None, None),
doc="""Allows overriding the extents of the Element
in 3D space defined as (xmin, ymin, zmin,
xmax, ymax, zmax).""")
class Collator(NdElement):
"""
Collator is an NdMapping type which can merge any number
of HoloViews components with whatever level of nesting
by inserting the Collators key dimensions on the HoloMaps.
If the items in the Collator do not contain HoloMaps
they will be created. Collator also supports filtering
of Tree structures and dropping of constant dimensions.
"""
drop = param.List(default=[], doc="""
List of dimensions to drop when collating data, specified
as strings.""")
drop_constant = param.Boolean(default=False, doc="""
Whether to demote any non-varying key dimensions to
constant dimensions.""")
filters = param.List(default=[], doc="""
List of paths to drop when collating data, specified
as strings or tuples.""")
group = param.String(default='Collator')
progress_bar = param.Parameter(default=None, doc="""
The progress bar instance used to report progress. Set to
None to disable progress bars.""")
merge_type = param.ClassSelector(class_=NdMapping, default=HoloMap,
is_instance=False,instantiate=False)
value_transform = param.Callable(default=None, doc="""
If supplied the function will be applied on each Collator
value during collation. This may be used to apply an operation
to the data or load references from disk before they are collated
into a displayable HoloViews object.""")
vdims = param.List(default=[], doc="""
Collator operates on HoloViews objects, if vdims are specified
a value_transform function must also be supplied.""")
_deep_indexable = False
_auxiliary_component = False
_nest_order = {HoloMap: ViewableElement,
GridSpace: (HoloMap, CompositeOverlay, ViewableElement),
NdLayout: (GridSpace, HoloMap, ViewableElement),
NdOverlay: Element}
def __call__(self):
"""
Filter each Layout in the Collator with the supplied
path_filters. If merge is set to True all Layouts are
merged, otherwise an NdMapping containing all the
Layouts is returned. Optionally a list of dimensions
to be ignored can be supplied.
"""
constant_dims = self.static_dimensions
ndmapping = NdMapping(kdims=self.kdims)
num_elements = len(self)
for idx, (key, data) in enumerate(self.data.items()):
if isinstance(data, AttrTree):
data = data.filter(self.filters)
if len(self.vdims):
vargs = dict(zip(self.dimensions('value', label=True), data))
data = self.value_transform(vargs)
if not isinstance(data, Dimensioned):
raise ValueError("Collator values must be Dimensioned objects "
"before collation.")
dim_keys = zip(self.kdims, key)
varying_keys = [(d, k) for d, k in dim_keys if not self.drop_constant or
(d not in constant_dims and d not in self.drop)]
constant_keys = [(d, k) for d, k in dim_keys if d in constant_dims
and d not in self.drop and self.drop_constant]
if varying_keys or constant_keys:
data = self._add_dimensions(data, varying_keys,
dict(constant_keys))
ndmapping[key] = data
if self.progress_bar is not None:
self.progress_bar(float(idx+1)/num_elements*100)
components = ndmapping.values()
accumulator = ndmapping.last.clone(components[0].data)
for component in components:
accumulator.update(component)
return accumulator
def _add_item(self, key, value, sort=True, update=True):
NdMapping._add_item(self, key, value, sort, update)
@property
def static_dimensions(self):
"""
Return all constant dimensions.
"""
dimensions = []
for dim in self.kdims:
if len(set(self[dim.name])) == 1:
dimensions.append(dim)
return dimensions
def _add_dimensions(self, item, dims, constant_keys):
"""
Recursively descend through an Layout and NdMapping objects
in order to add the supplied dimension values to all contained
HoloMaps.
"""
if isinstance(item, Layout):
item.fixed = False
dim_vals = [(dim, val) for dim, val in dims[::-1]
if dim not in self.drop]
if isinstance(item, self.merge_type):
new_item = item.clone(cdims=constant_keys)
for dim, val in dim_vals:
dim = dim if isinstance(dim, Dimension) else Dimension(dim)
if dim not in new_item.kdims:
new_item = new_item.add_dimension(dim, 0, val)
elif isinstance(item, self._nest_order[self.merge_type]):
if len(dim_vals):
dimensions, key = zip(*dim_vals)
new_item = self.merge_type({key: item}, kdims=dimensions,
cdims=constant_keys)
else:
new_item = item
else:
new_item = item.clone(shared_data=False, cdims=constant_keys)
for k, v in item.items():
new_item[k] = self._add_dimensions(v, dims[::-1], constant_keys)
if isinstance(new_item, Layout):
new_item.fixed = True
return new_item
__all__ = list(set([_k for _k, _v in locals().items()
if isinstance(_v, type) and issubclass(_v, Dimensioned)]))
|
bsd-3-clause
|
hrjn/scikit-learn
|
sklearn/utils/tests/test_metaestimators.py
|
86
|
2304
|
from sklearn.utils.testing import assert_true, assert_false
from sklearn.utils.metaestimators import if_delegate_has_method
class Prefix(object):
def func(self):
pass
class MockMetaEstimator(object):
"""This is a mock meta estimator"""
a_prefix = Prefix()
@if_delegate_has_method(delegate="a_prefix")
def func(self):
"""This is a mock delegated function"""
pass
def test_delegated_docstring():
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.__dict__['func'].__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.func.__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator().func.__doc__))
class MetaEst(object):
"""A mock meta estimator"""
def __init__(self, sub_est, better_sub_est=None):
self.sub_est = sub_est
self.better_sub_est = better_sub_est
@if_delegate_has_method(delegate='sub_est')
def predict(self):
pass
class MetaEstTestTuple(MetaEst):
"""A mock meta estimator to test passing a tuple of delegates"""
@if_delegate_has_method(delegate=('sub_est', 'better_sub_est'))
def predict(self):
pass
class MetaEstTestList(MetaEst):
"""A mock meta estimator to test passing a list of delegates"""
@if_delegate_has_method(delegate=['sub_est', 'better_sub_est'])
def predict(self):
pass
class HasPredict(object):
"""A mock sub-estimator with predict method"""
def predict(self):
pass
class HasNoPredict(object):
"""A mock sub-estimator with no predict method"""
pass
def test_if_delegate_has_method():
assert_true(hasattr(MetaEst(HasPredict()), 'predict'))
assert_false(hasattr(MetaEst(HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasNoPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestTuple(HasPredict(), HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestList(HasNoPredict(), HasPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestList(HasPredict(), HasPredict()), 'predict'))
|
bsd-3-clause
|
sangwook236/SWDT
|
sw_dev/python/ext/test/documentation/pdf2image_test.py
|
2
|
1426
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# REF [site] >> https://github.com/Belval/pdf2image
import pdf2image
import matplotlib.pyplot as plt
def basic_example():
pdf_filepath = '/path/to/example.pdf'
try:
# PIL images.
images = pdf2image.convert_from_path(pdf_filepath, dpi=200, output_folder=None, first_page=None, last_page=None, fmt='ppm')
#images = pdf2image.convert_from_bytes(open(pdf_filepath, 'rb').read(), dpi=200, output_folder=None, first_page=None, last_page=None, fmt='ppm')
except pdf2image.exceptions.PDFInfoNotInstalledError as ex:
print('PDFInfoNotInstalledError in {}: {}.'.format(pdf_filepath, ex))
return
except pdf2image.exceptions.PDFPageCountError as ex:
print('PDFPageCountError in {}: {}.'.format(pdf_filepath, ex))
return
except pdf2image.exceptions.PDFSyntaxError as ex:
print('PDFSyntaxError in {}: {}.'.format(pdf_filepath, ex))
return
except FileNotFoundError as ex:
print('File not found, {}: {}.'.format(pdf_filepath, ex))
return
print('#images = {}.'.format(len(images)))
for idx, img in enumerate(images):
print('Image #{}: Size = {}, mode = {}.'.format(idx, img.size, img.mode))
#img.save('./pdf2image_{}.png'.format(idx))
#img.show()
plt.imshow(img)
plt.axis('off')
plt.tight_layout()
plt.show()
def main():
basic_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
|
gpl-3.0
|
nvoron23/statsmodels
|
statsmodels/graphics/tests/test_regressionplots.py
|
6
|
9233
|
'''Tests for regressionplots, entire module is skipped
'''
import numpy as np
import nose
import statsmodels.api as sm
from statsmodels.graphics.regressionplots import (plot_fit, plot_ccpr,
plot_partregress, plot_regress_exog, abline_plot,
plot_partregress_grid, plot_ccpr_grid, add_lowess,
plot_added_variable, plot_partial_residuals,
plot_ceres_residuals)
from pandas import Series, DataFrame
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_regressionplots.pdf")
else:
pdf = None
def setup():
if not have_matplotlib:
raise nose.SkipTest('No tests here')
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
def teardown_module():
plt.close('all')
if pdf_output:
pdf.close()
class TestPlot(object):
def __init__(self):
self.setup() #temp: for testing without nose
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
res = sm.OLS(y, exog0).fit()
self.res = res
def test_plot_fit(self):
res = self.res
fig = plot_fit(res, 0, y_true=None)
x0 = res.model.exog[:, 0]
yf = res.fittedvalues
y = res.model.endog
px1, px2 = fig.axes[0].get_lines()[0].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(y, px2)
px1, px2 = fig.axes[0].get_lines()[1].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(yf, px2)
close_or_save(pdf, fig)
def test_plot_oth(self):
#just test that they run
res = self.res
endog = res.model.endog
exog = res.model.exog
plot_fit(res, 0, y_true=None)
plot_partregress_grid(res, exog_idx=[0,1])
plot_regress_exog(res, exog_idx=0)
plot_ccpr(res, exog_idx=0)
plot_ccpr_grid(res, exog_idx=[0])
fig = plot_ccpr_grid(res, exog_idx=[0,1])
for ax in fig.axes:
add_lowess(ax)
close_or_save(pdf, fig)
class TestPlotPandas(TestPlot):
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
exog0 = DataFrame(exog0, columns=["const", "var1", "var2"])
y = Series(y, name="outcome")
res = sm.OLS(y, exog0).fit()
self.res = res
class TestABLine(object):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
mod = sm.OLS(y,X).fit()
cls.X = X
cls.y = y
cls.mod = mod
def test_abline_model(self):
fig = abline_plot(model_results=self.mod)
ax = fig.axes[0]
ax.scatter(self.X[:,1], self.y)
close_or_save(pdf, fig)
def test_abline_model_ax(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(model_results=self.mod, ax=ax)
close_or_save(pdf, fig)
def test_abline_ab(self):
mod = self.mod
intercept, slope = mod.params
fig = abline_plot(intercept=intercept, slope=slope)
close_or_save(pdf, fig)
def test_abline_ab_ax(self):
mod = self.mod
intercept, slope = mod.params
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(intercept=intercept, slope=slope, ax=ax)
close_or_save(pdf, fig)
class TestABLinePandas(TestABLine):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
cls.X = X
cls.y = y
X = DataFrame(X, columns=["const", "someX"])
y = Series(y, name="outcome")
mod = sm.OLS(y,X).fit()
cls.mod = mod
class TestAddedVariablePlot(object):
def test_added_variable_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
lin_pred = 4 + exog[:, 0] + 0.2*exog[:, 1]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 0, 1, 2:
for use_glm_weights in False, True:
for resid_type in "resid_deviance", "resid_response":
weight_str = ["Unweighted", "Weighted"][use_glm_weights]
# Run directly and called as a results method.
for j in 0,1:
if j == 0:
fig = plot_added_variable(results, focus_col,
use_glm_weights=use_glm_weights,
resid_type=resid_type)
ti = "Added variable plot"
else:
fig = results.plot_added_variable(focus_col,
use_glm_weights=use_glm_weights,
resid_type=resid_type)
ti = "Added variable plot (called as method)"
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.7])
effect_str = ["Linear effect, slope=1",
"Quadratic effect", "No effect"][focus_col]
ti += "\nPoisson regression\n"
ti += effect_str + "\n"
ti += weight_str + "\n"
ti += "Using '%s' residuals" % resid_type
ax.set_title(ti)
close_or_save(pdf, fig)
class TestPartialResidualPlot(object):
def test_partial_residual_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
lin_pred = 4 + exog[:, 1] + 0.2*exog[:, 2]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 1, 2:
for j in 0,1:
if j == 0:
fig = plot_partial_residuals(results, focus_col)
else:
fig = results.plot_partial_residuals(focus_col)
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.77])
effect_str = ["Intercept", "Linear effect, slope=1",
"Quadratic effect"][focus_col]
ti = "Partial residual plot"
if j == 1:
ti += " (called as method)"
ax.set_title(ti + "\nPoisson regression\n" +
effect_str)
close_or_save(pdf, fig)
class TestCERESPlot(object):
def test_ceres_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
lin_pred = 4 + exog[:, 1] + 0.2*exog[:, 2]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 1, 2:
for j in 0, 1:
if j == 0:
fig = plot_ceres_residuals(results, focus_col)
else:
fig = results.plot_ceres_residuals(focus_col)
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.77])
effect_str = ["Intercept", "Linear effect, slope=1",
"Quadratic effect"][focus_col]
ti = "CERES plot"
if j == 1:
ti += " (called as method)"
ax.set_title(ti + "\nPoisson regression\n" +
effect_str)
close_or_save(pdf, fig)
|
bsd-3-clause
|
ericmjl/bokeh
|
examples/webgl/clustering.py
|
1
|
2140
|
''' Example inspired by an example from the scikit-learn project:
http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html
'''
import numpy as np
from sklearn import cluster, datasets
from sklearn.preprocessing import StandardScaler
from bokeh.layouts import column, row
from bokeh.plotting import figure, output_file, show
print("\n\n*** This example may take several seconds to run before displaying. ***\n\n")
N = 50000
PLOT_SIZE = 400
# generate datasets.
np.random.seed(0)
noisy_circles = datasets.make_circles(n_samples=N, factor=.5, noise=.04)
noisy_moons = datasets.make_moons(n_samples=N, noise=.05)
centers = [(-2, 3), (2, 3), (-2, -3), (2, -3)]
blobs1 = datasets.make_blobs(centers=centers, n_samples=N, cluster_std=0.4, random_state=8)
blobs2 = datasets.make_blobs(centers=centers, n_samples=N, cluster_std=0.7, random_state=8)
colors = np.array([x for x in ('#00f', '#0f0', '#f00', '#0ff', '#f0f', '#ff0')])
colors = np.hstack([colors] * 20)
# create clustering algorithms
dbscan = cluster.DBSCAN(eps=.2)
birch = cluster.Birch(n_clusters=2)
means = cluster.MiniBatchKMeans(n_clusters=2)
spectral = cluster.SpectralClustering(n_clusters=2, eigen_solver='arpack', affinity="nearest_neighbors")
affinity = cluster.AffinityPropagation(damping=.9, preference=-200)
# change here, to select clustering algorithm (note: spectral is slow)
algorithm = dbscan # <- SELECT ALG
plots =[]
for dataset in (noisy_circles, noisy_moons, blobs1, blobs2):
X, y = dataset
X = StandardScaler().fit_transform(X)
# predict cluster memberships
algorithm.fit(X)
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
p = figure(output_backend="webgl", title=algorithm.__class__.__name__,
plot_width=PLOT_SIZE, plot_height=PLOT_SIZE)
p.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), alpha=0.1,)
plots.append(p)
# generate layout for the plots
layout = column(row(plots[:2]), row(plots[2:]))
output_file("clustering.html", title="clustering with sklearn")
show(layout)
|
bsd-3-clause
|
Walter1218/rpn_detector_tf
|
train.py
|
1
|
4133
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from datetime import datetime
import os.path
import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
from config import *
from netarch import *
import batch_generate
import pandas as pd
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
tf.app.flags.DEFINE_string('pretrained_model_path', './ResNet/ResNet-50-weights.pkl',"""Path to the pretrained model.""")
tf.app.flags.DEFINE_string('train_dir', './tf_detection',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000,
"""Maximum number of batches to run.""")
tf.app.flags.DEFINE_integer('checkpoint_step', 1000,
"""Number of steps to save summary.""")
def train():
with tf.Graph().as_default():
mc = model_parameters()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = ResNet50(mc, FLAGS.gpu)
saver = tf.train.Saver(tf.global_variables())
summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(init)
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
data = pd.read_csv('voc_xywh.csv')
#data = pd.read_csv('FDDB2XYWH.csv')
data = data.drop('Unnamed: 0', 1)
#TODO, add trainval split code here;
img_channel_mean = [103.939, 116.779, 123.68]
for step in xrange(FLAGS.max_steps):
start_time = time.time()
i_line = np.random.randint(len(data))
name_str, img, bb_boxes = batch_generate.get_img_by_name(data, i_line, size = (960, 640), dataset = 'PASCAL_VOC')#,dataset = 'FDDB')
#print(bb_boxes)
#Normalize
img = img.astype(np.float32)
img[:, :, 0] -= img_channel_mean[0]
img[:, :, 1] -= img_channel_mean[1]
img[:, :, 2] -= img_channel_mean[2]
img_per_batch = np.expand_dims(img, axis = 0)
anchor_box = np.expand_dims(mc.ANCHOR_BOX, axis = 0)
#if(mc.cls):
# labels, bbox_targets, bbox_inside_weights, bbox_outside_weights, cls_map = batch_generate.target_label_generate(bb_boxes, anchor_box, mc, DEBUG = False)
#else:
labels, bbox_targets, bbox_inside_weights, bbox_outside_weights , groundtruth = batch_generate.target_label_generate(bb_boxes, anchor_box, mc, DEBUG = False)
feed_dict = {
model.image_input : img_per_batch,
model.keep_prob : mc.KEEP_PROB,
model.target_label : np.expand_dims(labels, axis = 0),
model.target_delta : np.expand_dims(bbox_targets, axis = 0),
model.bbox_in_weight : np.expand_dims(bbox_inside_weights, axis = 0),
model.bbox_out_weight : np.expand_dims(bbox_outside_weights, axis = 0),
model.gt_boxes : groundtruth,
#model.cls_map: np.expand_dims(cls_map, axis = 0),# for end2end classification
}
losses = sess.run([model._losses, model.train_op], feed_dict = feed_dict)
print('the training step is {0}, and losses is {1}'.format(step, losses))
if step % FLAGS.checkpoint_step == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
train()
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/examples/pylab_examples/fonts_demo.py
|
12
|
2765
|
#!/usr/bin/env python
"""
Show how to set custom font properties.
For interactive users, you can also use kwargs to the text command,
which requires less typing. See examples/fonts_demo_kw.py
"""
from matplotlib.font_manager import FontProperties
from pylab import *
subplot(111, axisbg='w')
font0 = FontProperties()
alignment = {'horizontalalignment':'center', 'verticalalignment':'baseline'}
### Show family options
family = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
font1 = font0.copy()
font1.set_size('large')
t = text(-0.8, 0.9, 'family', fontproperties=font1,
**alignment)
yp = [0.7, 0.5, 0.3, 0.1, -0.1, -0.3, -0.5]
for k in range(5):
font = font0.copy()
font.set_family(family[k])
if k == 2:
font.set_name('Script MT')
t = text(-0.8, yp[k], family[k], fontproperties=font,
**alignment)
### Show style options
style = ['normal', 'italic', 'oblique']
t = text(-0.4, 0.9, 'style', fontproperties=font1,
**alignment)
for k in range(3):
font = font0.copy()
font.set_family('sans-serif')
font.set_style(style[k])
t = text(-0.4, yp[k], style[k], fontproperties=font,
**alignment)
### Show variant options
variant= ['normal', 'small-caps']
t = text(0.0, 0.9, 'variant', fontproperties=font1,
**alignment)
for k in range(2):
font = font0.copy()
font.set_family('serif')
font.set_variant(variant[k])
t = text( 0.0, yp[k], variant[k], fontproperties=font,
**alignment)
### Show weight options
weight = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
t = text( 0.4, 0.9, 'weight', fontproperties=font1,
**alignment)
for k in range(7):
font = font0.copy()
font.set_weight(weight[k])
t = text( 0.4, yp[k], weight[k], fontproperties=font,
**alignment)
### Show size options
size = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large']
t = text( 0.8, 0.9, 'size', fontproperties=font1,
**alignment)
for k in range(7):
font = font0.copy()
font.set_size(size[k])
t = text( 0.8, yp[k], size[k], fontproperties=font,
**alignment)
### Show bold italic
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-small')
t = text(0, 0.1, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('medium')
t = text(0, 0.2, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-large')
t = text(0, 0.3, 'bold italic', fontproperties=font,
**alignment)
axis([-1,1,0,1])
show()
|
gpl-2.0
|
e-q/scipy
|
scipy/interpolate/polyint.py
|
4
|
24681
|
import numpy as np
from scipy.special import factorial
from scipy._lib._util import _asarray_validated, float_factorial
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator",
"barycentric_interpolate", "approximate_taylor_polynomial"]
def _isscalar(x):
"""Check whether x is if a scalar type, or 0-dim"""
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
class _Interpolator1D(object):
"""
Common features in univariate interpolation
Deal with input data type and interpolation axis rolling. The
actual interpolator can assume the y-data is of shape (n, r) where
`n` is the number of x-points, and `r` the number of variables,
and use self.dtype as the y-data type.
Attributes
----------
_y_axis
Axis along which the interpolation goes in the original array
_y_extra_shape
Additional trailing shape of the input arrays, excluding
the interpolation axis.
dtype
Dtype of the y-data arrays. Can be set via _set_dtype, which
forces it to be float or complex.
Methods
-------
__call__
_prepare_x
_finish_y
_reshape_yi
_set_yi
_set_dtype
_evaluate
"""
__slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
def __init__(self, xi=None, yi=None, axis=None):
self._y_axis = axis
self._y_extra_shape = None
self.dtype = None
if yi is not None:
self._set_yi(yi, xi=xi, axis=axis)
def __call__(self, x):
"""
Evaluate the interpolant
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Input values `x` must be convertible to `float` values like `int`
or `float`.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate(x)
return self._finish_y(y, x_shape)
def _evaluate(self, x):
"""
Actually evaluate the value of the interpolator.
"""
raise NotImplementedError()
def _prepare_x(self, x):
"""Reshape input x array to 1-D"""
x = _asarray_validated(x, check_finite=False, as_inexact=True)
x_shape = x.shape
return x.ravel(), x_shape
def _finish_y(self, y, x_shape):
"""Reshape interpolated y back to an N-D array similar to initial y"""
y = y.reshape(x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = (list(range(nx, nx + self._y_axis))
+ list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
y = y.transpose(s)
return y
def _reshape_yi(self, yi, check=False):
yi = np.rollaxis(np.asarray(yi), self._y_axis)
if check and yi.shape[1:] != self._y_extra_shape:
ok_shape = "%r + (N,) + %r" % (self._y_extra_shape[-self._y_axis:],
self._y_extra_shape[:-self._y_axis])
raise ValueError("Data must be of shape %s" % ok_shape)
return yi.reshape((yi.shape[0], -1))
def _set_yi(self, yi, xi=None, axis=None):
if axis is None:
axis = self._y_axis
if axis is None:
raise ValueError("no interpolation axis specified")
yi = np.asarray(yi)
shape = yi.shape
if shape == ():
shape = (1,)
if xi is not None and shape[axis] != len(xi):
raise ValueError("x and y arrays must be equal in length along "
"interpolation axis.")
self._y_axis = (axis % yi.ndim)
self._y_extra_shape = yi.shape[:self._y_axis]+yi.shape[self._y_axis+1:]
self.dtype = None
self._set_dtype(yi.dtype)
def _set_dtype(self, dtype, union=False):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.dtype, np.complexfloating):
self.dtype = np.complex_
else:
if not union or self.dtype != np.complex_:
self.dtype = np.float_
class _Interpolator1DWithDerivatives(_Interpolator1D):
def derivatives(self, x, der=None):
"""
Evaluate many derivatives of the polynomial at the point x
Produce an array of all derivative values at the point x.
Parameters
----------
x : array_like
Point or points at which to evaluate the derivatives
der : int or None, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points). This number includes the function value as 0th
derivative.
Returns
-------
d : ndarray
Array with derivatives; d[j] contains the jth derivative.
Shape of d[j] is determined by replacing the interpolation
axis in the original array with the shape of x.
Examples
--------
>>> from scipy.interpolate import KroghInterpolator
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
array([1.0,2.0,3.0])
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
array([[1.0,1.0],
[2.0,2.0],
[3.0,3.0]])
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der)
y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = ([0] + list(range(nx+1, nx + self._y_axis+1))
+ list(range(1, nx+1)) +
list(range(nx+1+self._y_axis, nx+ny+1)))
y = y.transpose(s)
return y
def derivative(self, x, der=1):
"""
Evaluate one derivative of the polynomial at the point x
Parameters
----------
x : array_like
Point or points at which to evaluate the derivatives
der : integer, optional
Which derivative to extract. This number includes the
function value as 0th derivative.
Returns
-------
d : ndarray
Derivative interpolated at the x-points. Shape of d is
determined by replacing the interpolation axis in the
original array with the shape of x.
Notes
-----
This is computed by evaluating all derivatives up to the desired
one (using self.derivatives()) and then discarding the rest.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der+1)
return self._finish_y(y[der], x_shape)
class KroghInterpolator(_Interpolator1DWithDerivatives):
"""
Interpolating polynomial for a set of points.
The polynomial passes through all the pairs (xi,yi). One may
additionally specify a number of derivatives at each point xi;
this is done by repeating the value xi and specifying the
derivatives as successive yi values.
Allows evaluation of the polynomial and all its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Parameters
----------
xi : array_like, length N
Known x-coordinates. Must be sorted in increasing order.
yi : array_like
Known y-coordinates. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on [1]_.
References
----------
.. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation", 1970.
Examples
--------
To produce a polynomial that is zero at 0 and 1 and has
derivative 2 at 0, call
>>> from scipy.interpolate import KroghInterpolator
>>> KroghInterpolator([0,0,1],[0,2,0])
This constructs the quadratic 2*X**2-2*X. The derivative condition
is indicated by the repeated zero in the xi array; the corresponding
yi values are 0, the function value, and 2, the derivative value.
For another example, given xi, yi, and a derivative ypi for each
point, appropriate arrays can be constructed as:
>>> xi = np.linspace(0, 1, 5)
>>> yi, ypi = np.random.rand(2, 5)
>>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
>>> KroghInterpolator(xi_k, yi_k)
To produce a vector-valued polynomial, supply a higher-dimensional
array for yi:
>>> KroghInterpolator([0,1],[[2,3],[4,5]])
This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
"""
def __init__(self, xi, yi, axis=0):
_Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)
self.xi = np.asarray(xi)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
c = np.zeros((self.n+1, self.r), dtype=self.dtype)
c[0] = self.yi[0]
Vk = np.zeros((self.n, self.r), dtype=self.dtype)
for k in range(1, self.n):
s = 0
while s <= k and xi[k-s] == xi[k]:
s += 1
s -= 1
Vk[0] = self.yi[k]/float_factorial(s)
for i in range(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s == 0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def _evaluate(self, x):
pi = 1
p = np.zeros((len(x), self.r), dtype=self.dtype)
p += self.c[0,np.newaxis,:]
for k in range(1, self.n):
w = x - self.xi[k-1]
pi = w*pi
p += pi[:,np.newaxis] * self.c[k]
return p
def _evaluate_derivatives(self, x, der=None):
n = self.n
r = self.r
if der is None:
der = self.n
pi = np.zeros((n, len(x)))
w = np.zeros((n, len(x)))
pi[0] = 1
p = np.zeros((len(x), self.r), dtype=self.dtype)
p += self.c[0, np.newaxis, :]
for k in range(1, n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1] * pi[k-1]
p += pi[k, :, np.newaxis] * self.c[k]
cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
cn[0] = p
for k in range(1, n):
for i in range(1, n-k+1):
pi[i] = w[k+i-1]*pi[i-1] + pi[i]
cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
cn[k] *= float_factorial(k)
cn[n, :, :] = 0
return cn[:der]
def krogh_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for polynomial interpolation.
See `KroghInterpolator` for more details.
Parameters
----------
xi : array_like
Known x-coordinates.
yi : array_like
Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
vectors of length R, or scalars if R=1.
x : array_like
Point or points at which to evaluate the derivatives.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
d : ndarray
If the interpolator's values are R-D then the
returned array will be the number of derivatives by N by R.
If `x` is a scalar, the middle dimension will be dropped; if
the `yi` are scalars then the last dimension will be dropped.
See Also
--------
KroghInterpolator : Krogh interpolator
Notes
-----
Construction of the interpolating polynomial is a relatively expensive
process. If you want to evaluate it repeatedly consider using the class
KroghInterpolator (which is what this function uses).
Examples
--------
We can interpolate 2D observed data using krogh interpolation:
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import krogh_interpolate
>>> x_observed = np.linspace(0.0, 10.0, 11)
>>> y_observed = np.sin(x_observed)
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
>>> y = krogh_interpolate(x_observed, y_observed, x)
>>> plt.plot(x_observed, y_observed, "o", label="observation")
>>> plt.plot(x, y, label="krogh interpolation")
>>> plt.legend()
>>> plt.show()
"""
P = KroghInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
"""
Estimate the Taylor polynomial of f at x by polynomial fitting.
Parameters
----------
f : callable
The function whose Taylor polynomial is sought. Should accept
a vector of `x` values.
x : scalar
The point at which the polynomial is to be evaluated.
degree : int
The degree of the Taylor polynomial
scale : scalar
The width of the interval to use to evaluate the Taylor polynomial.
Function values spread over a range this wide are used to fit the
polynomial. Must be chosen carefully.
order : int or None, optional
The order of the polynomial to be used in the fitting; `f` will be
evaluated ``order+1`` times. If None, use `degree`.
Returns
-------
p : poly1d instance
The Taylor polynomial (translated to the origin, so that
for example p(0)=f(x)).
Notes
-----
The appropriate choice of "scale" is a trade-off; too large and the
function differs from its Taylor polynomial too much to get a good
answer, too small and round-off errors overwhelm the higher-order terms.
The algorithm used becomes numerically unstable around order 30 even
under ideal circumstances.
Choosing order somewhat larger than degree may improve the higher-order
terms.
Examples
--------
We can calculate Taylor approximation polynomials of sin function with
various degrees:
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import approximate_taylor_polynomial
>>> x = np.linspace(-10.0, 10.0, num=100)
>>> plt.plot(x, np.sin(x), label="sin curve")
>>> for degree in np.arange(1, 15, step=2):
... sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1,
... order=degree + 2)
... plt.plot(x, sin_taylor(x), label=f"degree={degree}")
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
... borderaxespad=0.0, shadow=True)
>>> plt.tight_layout()
>>> plt.axis([-10, 10, -10, 10])
>>> plt.show()
"""
if order is None:
order = degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(_Interpolator1D):
"""The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points.
Allows evaluation of the polynomial, efficient changing of the y
values to be interpolated, and updating by adding more x values.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial.
The values yi need to be provided before the function is
evaluated, but none of the preprocessing depends on them, so rapid
updates are possible.
Parameters
----------
xi : array_like
1-D array of x coordinates of the points the polynomial
should pass through
yi : array_like, optional
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later via the `set_y` method.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
This class uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
"""
def __init__(self, xi, yi=None, axis=0):
_Interpolator1D.__init__(self, xi, yi, axis)
self.xi = np.asfarray(xi)
self.set_yi(yi)
self.n = len(self.xi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in range(1, self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def set_yi(self, yi, axis=None):
"""
Update the y values to be interpolated
The barycentric interpolation algorithm requires the calculation
of weights, but these depend only on the xi. The yi can be changed
at any time.
Parameters
----------
yi : array_like
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
"""
if yi is None:
self.yi = None
return
self._set_yi(yi, xi=self.xi, axis=axis)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
def add_xi(self, xi, yi=None):
"""
Add more x values to the set to be interpolated
The barycentric interpolation algorithm allows easy updating by
adding more points for the polynomial to pass through.
Parameters
----------
xi : array_like
The x coordinates of the points that the polynomial should pass
through.
yi : array_like, optional
The y coordinates of the points the polynomial should pass through.
Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
vector-valued.
If `yi` is not given, the y values will be supplied later. `yi` should
be given if and only if the interpolator has y values specified.
"""
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = self._reshape_yi(yi, check=True)
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi **= -1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in range(old_n, self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def __call__(self, x):
"""Evaluate the interpolating polynomial at the points x
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Currently the code computes an outer product between x and the
weights, that is, it constructs an intermediate array of size
N by len(x), where N is the degree of the polynomial.
"""
return _Interpolator1D.__call__(self, x)
def _evaluate(self, x):
if x.size == 0:
p = np.zeros((0, self.r), dtype=self.dtype)
else:
c = x[...,np.newaxis]-self.xi
z = c == 0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[...,np.newaxis]
# Now fix where x==some xi
r = np.nonzero(z)
if len(r) == 1: # evaluation at a scalar
if len(r[0]) > 0: # equals one of the points
p = self.yi[r[0][0]]
else:
p[r[:-1]] = self.yi[r[-1]]
return p
def barycentric_interpolate(xi, yi, x, axis=0):
"""
Convenience function for polynomial interpolation.
Constructs a polynomial that passes through a given set of points,
then evaluates the polynomial. For reasons of numerical stability,
this function does not compute the coefficients of the polynomial.
This function uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the `x` coordinates are chosen very
carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Parameters
----------
xi : array_like
1-D array of x coordinates of the points the polynomial should
pass through
yi : array_like
The y coordinates of the points the polynomial should pass through.
x : scalar or array_like
Points to evaluate the interpolator at.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
y : scalar or array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
See Also
--------
BarycentricInterpolator : Bary centric interpolator
Notes
-----
Construction of the interpolation weights is a relatively slow process.
If you want to call this many times with the same xi (but possibly
varying yi or x) you should use the class `BarycentricInterpolator`.
This is what this function uses internally.
Examples
--------
We can interpolate 2D observed data using barycentric interpolation:
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import barycentric_interpolate
>>> x_observed = np.linspace(0.0, 10.0, 11)
>>> y_observed = np.sin(x_observed)
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
>>> y = barycentric_interpolate(x_observed, y_observed, x)
>>> plt.plot(x_observed, y_observed, "o", label="observation")
>>> plt.plot(x, y, label="barycentric interpolation")
>>> plt.legend()
>>> plt.show()
"""
return BarycentricInterpolator(xi, yi, axis=axis)(x)
|
bsd-3-clause
|
TUW-GEO/SMDC-performance
|
docs/conf.py
|
1
|
8859
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
from sphinx import apidoc
import mock
MOCK_MODULES = ['numpy', 'matplotlib', 'pyplot', 'matplotlib.pyplot',
'pygeogrids', 'grids', 'pygeogrids.grids', 'pandas', 'scipy',
'stats','scipy.stats', 'netCDF4']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/_rst")
module_dir = os.path.join(__location__, "../smdc_perftests")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath', 'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SMDC_perftests'
copyright = u'2014, Vienna University of Technology'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed
# from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from smdc_perftests import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'smdc_perftests-doc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'SMDC_perftests Documentation',
u'Christoph Paulik', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ----------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
|
bsd-3-clause
|
kensugino/jGEM
|
jgem/calccov.py
|
1
|
25770
|
"""
.. module:: calccov
:synopsis: calculate coverages, gene length using nnls or
.. moduleauthor:: Ken Sugino <[email protected]>
"""
# system imports
import multiprocessing
import os
import time
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
from itertools import repeat
from functools import reduce
# 3rd party imports
import pandas as PD
import numpy as N
from scipy.optimize import nnls
# library imports
from jgem import utils as UT
from jgem import bigwig as BW
from jgem import gtfgffbed as GGB
### Weighted NLS #########################################################
from numpy import diag, sqrt, dot
def wnnls(A,b,w):
"""Least Square (minimize (b-A*x)^2) solution: x = inv(t(A)*A)*(t(A)*b).
Weighted LS: x = inv(t(A')*A')*(t(A')*b') where:
A' = diag(w)*A
b' = diag(w)*b
"""
dw = diag(w)
Ap = dot(dw, A)
bp = dot(dw, b)
return nnls(Ap,bp)
def pnnls(A,b):
"""For Poisson weight (variance) w_i = 1/sqrt(b_i), so
A' = diag(1/sqrt(b))*A
b' = diag(1/sqrt(b))*b = sqrt(b)
Returns:
x: solution
e: error (sqrt-ed)
"""
bp = sqrt(b)
dw = diag(1./(bp+1.)) # +1. to avoid inf
Ap = dot(dw, A)
return nnls(Ap,bp)
### All in one func ##################################################
def calc_cov_ovl_mp(srcname, bwname, dstname, np=1, covciname=None,
ciname=None, colname='cov', override=False):
"""Calculate coverage (from BigWig) over intervals (from srcname).
A column (default 'cov') which contains coverages is added to source dataframe
and the source is overwritten.
Args:
srcname: path to exons tsv
bwname: path to bigwig
dstname: path for result
np: number of processors
covciname: path to covci (coverage for chopped interval dataframe)
ciname: path to ci (chopped interval dataframe)
colname: name for column which contain calculated coverages
Returns:
source dataframe with column (cov) added
SideEffects:
source tsv is overwritten with new column added
"""
if UT.isstring(srcname):
exons = UT.read_pandas(srcname)
else:
exons = srcname
# cache
if covciname is None:
assert(UT.isstring(srcname))
covciname = srcname[:-7]+'.covci.txt.gz'
if ciname is None:
assert(UT.isstring(srcname))
ciname = srcname[:-7]+'.ci.txt.gz'
if override or (not os.path.exists(covciname)):
LOG.debug('calculating covci...')
_sttime = time.time()
if override or not (os.path.exists(ciname)):
ci = UT.chopintervals(exons, ciname)
else:
ci = UT.read_pandas(ciname, names=['chr','st','ed','name','id'])
ci['name'] = ci['name'].astype(str)
covci = calc_cov_mp(ci,bwname,covciname,np)
LOG.debug(' time: {0:.3f}s'.format(time.time()-_sttime))
else:
LOG.debug('loading cached covci...')
covci = UT.read_pandas(covciname)
covci['name'] = covci['name'].astype(str)
# covci: chopped interval's cov => reverse
# ci => exon id ====> revers exon => ci indices
# exon cov = sum(cicov*cilen)/totlen
LOG.debug('calculating exon cov...')
if 'id' not in covci.columns:
covci['id'] = covci['sc1']
_sttime = time.time()
e2c = {}
for i,name in covci[['id','name']].values:
for eid in name.split(','):
e2c.setdefault(int(eid),[]).append(i)
covci['len'] = covci['ed']-covci['st']
covci['val'] = covci['cov']*covci['len']
def _gen():
for eid in exons['_id']:
for cid in e2c[eid]:
yield (cid,eid)
tmp = PD.DataFrame(list(set([x for x in _gen()])),columns=['cid','eid'])
c2len = dict(covci[['id','len']].values)
c2val = dict(covci[['id','val']].values)
tmp['val'] = [c2val[x] for x in tmp['cid']]
tmp['len'] = [c2len[x] for x in tmp['cid']]
tmpg = tmp.groupby('eid')[['val','len']].sum().reset_index()
tmpg['cov'] = tmpg['val']/tmpg['len']
e2cov = dict(tmpg[['eid','cov']].values)
exons[colname] = [e2cov[x] for x in exons['_id']]
UT.save_tsv_nidx_whead(exons, dstname)
return exons
### ecov calc with nnls ################################################
def mp_worker(args):
func, arg = args
return func(*arg)
def calc_ecov_chrom(covci,blocksize):
# covci: required cols
# name (eid str concat ',')
# name1 ([eids,...])
# id: cid
# cov: coverage for cid
# 2017-05-19 _find_chunk wasn't finding correct boundary so some exons are put into separate equations and was getting wrong vaules
covci = covci.sort_values('id')
if 'name1' not in covci.columns:
covci['name1'] = covci['name'].astype(str).apply(lambda x: [int(y) for y in x.split(',')])
if 'eidmax' not in covci.columns:
covci['eidmax'] = covci['name1'].apply(lambda x:max(x))
if 'eidmin' not in covci.columns:
covci['eidmin'] = covci['name1'].apply(lambda x:min(x))
# find boundary fix
covci['next_eidmin'] = list(covci['eidmin'].values[1:])+[N.max(covci['eidmax'])+1]
covci['boundary'] = covci['eidmax']<covci['next_eidmin'] # eid's are separate here
boundaries = covci['boundary'].values
e2c = {} # dict
# find chunk boundary with approximate blocksize
def _find_chunk(st, bs):
# first take stcid => stcid+blocksize
# reduce to self contained chunk => find isolated cid<=>eid (len(name1)==1) instance <= 2017-05-19 this is not true BUG!
# if final size is too small increase blocksize and try again
if st+bs+1>=len(covci):
return len(covci)
for x in range(st+bs,int(st+bs/2),-1):
#if len(covci['name1'].iloc[x])==1:# found isolated exon
if boundaries[x]==True:
return x+1
# not found => increase blocksize
if bs>500: # blocksize too big => NNLS take too much time
LOG.warning('blocksize exceeded 500')
return st+bs
return _find_chunk(st, 2*bs)
# calc ecov for a chunk
def _calc_ecov(st,ed):
# construct matrix
c = covci.iloc[st:ed]
emax = reduce(max,c['eidmax'].values, 0) # why not just use e['eidmax'].max()?
emin = reduce(min,c['eidmin'].values, c['eidmax'].max())
nc = len(c)
ne = emax+1-emin
mat = N.zeros((nc,ne))
for i,n1 in enumerate(c['name1'].values):# fill in rows
N.put(mat[i], N.array(n1)-emin, 1)
# solve by NNLS (nonnegative least square)
#ecov,err = nnls(mat, c['cov'].values)
# 2017-05-19 nnls => pnnls
ecov,err = pnnls(mat, c['cov'].values)
e2c.update(dict(zip(range(emin,emax+1),ecov)))# e -> cov
def catcherr(stcid, blocksize):
bs = blocksize
done = False
while((bs>0)&(not done)):
edcid = _find_chunk(stcid,bs)
try:
_calc_ecov(stcid,edcid)
done = True
except RuntimeError:
bs = int(bs/2)
if bs==0: # unsuccessfull set to zero
e2c[stcid] = 0.
LOG.warning('nnls did not converge for {0}, setting to 0'.format(stcid))
return stcid+1
return edcid
# iterate
stcid=0
while(stcid<len(covci)):
#edcid = _find_chunk(stcid,blocksize)
#_calc_ecov(stcid,edcid)
#stcid = edcid
stcid = catcherr(stcid, blocksize)
return e2c
def calc_ecov_mp(covci,fname,np,blocksize=100):
"""
WARNING: this assumes _id is assinged according to sorted (chr,st,ed)
"""
LOG.debug('calc_ecov...')
chroms = sorted(covci['chr'].unique())
if 'name1' not in covci.columns:
covci['name1'] = covci['name'].astype(str).apply(lambda x: [int(y) for y in x.split(',')])
if 'eidmax' not in covci.columns:
covci['eidmax'] = covci['name1'].apply(lambda x:max(x))
if 'eidmin' not in covci.columns:
covci['eidmin'] = covci['name1'].apply(lambda x:min(x))
args = [(covci[covci['chr']==c].copy(), blocksize) for c in chroms]
e2cs = {}
if np==1:
# for c,bwname,chrom,d in data:
for arg in args:
e2cs.update(calc_ecov_chrom(*arg))
else:
try:
p = multiprocessing.Pool(np)
rslts = p.map(mp_worker, zip(repeat(calc_ecov_chrom), args))
finally:
LOG.debug('closing pool')
p.close()
for x in rslts:
e2cs.update(x)
LOG.debug('writing rslts...')
if fname is None:
return e2cs
ccf = UT.flattendf(covci, 'name1')
ccfg = ccf.groupby('name1')
e2chr = dict(UT.izipcols(ccfg['chr'].first().reset_index(), ['name1','chr']))
e2st = dict(UT.izipcols(ccfg['st'].min().reset_index(), ['name1','st']))
e2ed = dict(UT.izipcols(ccfg['ed'].max().reset_index(), ['name1','ed']))
df = PD.DataFrame(e2cs,index=['ecov']).T
df.index.name='eid'
df = df.reset_index()
df['chr'] = [e2chr[x] for x in df['eid']]
df['st'] = [e2st[x] for x in df['eid']]
df['ed'] = [e2ed[x] for x in df['eid']]
UT.save_tsv_nidx_whead(df[['eid','chr','st','ed','ecov']], fname)
return df
### gcov, gmax calc low level ##########################################
def worker_cov(c,bwname,chrom, path):
it = BW.block_iter(bwname, chrom)
recs = calc_cov_chrom(c,it)
return recs
def worker_max(c,bwname,chrom, path):
it = BW.block_iter(bwname, chrom)
recs = calc_max_chrom(c,it)
return recs
def calc_cov_mp(bed, bwname, fname, np, which='cov'):
if which=='cov':
worker=worker_cov
elif which=='max':
worker=worker_max
if UT.isstring(bed):
bed = GGB.read_bed(bed)
#cols = list(bed.columns)+['cov']
cols = list(bed.columns)+[which]
chroms = bed['chr'].unique()
#LOG.debug(chroms)
cdir = os.path.dirname(__file__)
data = [(bed[bed['chr']==c].copy(), bwname, c, cdir) for c in chroms]
recs = []
if np==1:
# for c,bwname,chrom,d in data:
for arg in data:
LOG.debug('cov calculation: processing {0}...'.format(arg[-2]))
recs += worker(*arg)
else:
LOG.debug('{1} calculation: np={0}'.format(np,which))
try:
p = multiprocessing.Pool(np)
a = zip(repeat(worker), data)
rslts = p.map(mp_worker, a)
for v in rslts:
recs += v
LOG.debug('done {1} calculation: np={0}'.format(np,which))
finally:
LOG.debug('closing pool')
p.close()
#p.join()
#recs = reduce(iadd, rslts)
LOG.debug('writing rslts...')
df = PD.DataFrame(recs,columns=cols)
UT.save_tsv_nidx_whead(df, fname)
return df
def calc_max_chrom(c, b_iter):
nomore = False
try:
start,end,value = next(b_iter)
except StopIteration:
nomore = True
prev = None
rslts = [None]*len(c)
for i, row in enumerate(c.values): #c.iterrows():
row = list(row)
st,ed = row[1],row[2]
# advance until intersect
if not nomore:
while(end<st):
if prev is None:
try:
start,end,value=next(b_iter)
except StopIteration:
nomore = True
break
else:
start,end,value = curr
prev = None
if nomore:
row += [0.]
rslts[i] = row
else:
# once intersect collect values
v = 0.
prev = (start,end,value) # remember one before
while(start<ed):
st0 = max(start,st-1)
ed0 = min(end,ed)
#v += value*(ed0 - st0)
v = max(v, value)
prev = (start,end,value) # remember one before
try:
start,end,value=next(b_iter)
except StopIteration:
nomore=True
break
#row += [v/float(ed-st+1)]
row += [v]
rslts[i] = row
curr = start,end,value
start,end,value = prev
return rslts
def calc_cov_chrom(c, b_iter):
nomore = False
try:
start,end,value = next(b_iter)
except StopIteration:
nomore = True
prev = None
rslts = [None]*len(c)
for i, row in enumerate(c.values): #c.iterrows():
row = list(row)
st,ed = row[1],row[2]
# advance until intersect
if not nomore:
while(end<=st): # equal boundary important, otherwise if end==st it will skip one interval
if prev is None:
try:
start,end,value=next(b_iter)
except StopIteration:
nomore = True
break
else:
start,end,value = curr
prev = None
if nomore:
row += [0.]
rslts[i] = row
else:
# once intersect collect values
v = 0.
prev = (start,end,value) # remember one before
while(start<ed):
st0 = max(start,st)
ed0 = min(end,ed)
v += value*(ed0 - st0)
prev = (start,end,value) # remember one before
try:
start,end,value=next(b_iter)
except StopIteration:
nomore=True
break
row += [v/float(ed-st)]
rslts[i] = row
curr = start,end,value
start,end,value = prev
return rslts
### high level ##################################################
def calc_sjcnt(sjpath1, sjpath2, dstprefix, override=False, np=4):
pass
# [TODO] don't output chr,st,ed, only eid, ecov
def calc_ecov(expath, cipath, bwpath, dstprefix, blocksize=100, override=False, np=4):
"""Calculate exon coverages.
Args:
expath: merged ex
cipath: chopped interval for ex
bwpath: bigwig file (sample)
dstprefix: prefix for outputs
Outputs:
1. dstprefix+'.covci.txt.gz': coverage for ci
2. dstprefix+'.ecov.txt.gz' : DataFrame(cols: eid, chr, st, ed, ecov)
"""
covcipath = dstprefix+'covci.txt.gz'
ecovpath = dstprefix+'ecov.txt.gz'
ex = UT.read_pandas(expath)
if UT.notstale([expath, cipath], covcipath, override):
cc = UT.read_pandas(covcipath)
else:
if UT.notstale(expath, cipath, False): # you do not want to override ci
ci = UT.read_pandas(cipath, names=['chr','st','ed','name','id'])
else:
#ex = UT.read_pandas(expath)
ci = UT.chopintervals(ex, cipath, idcol='_id')
cc = calc_cov_mp(ci, bwpath, covcipath, np=np)
# ex = UT.read_pandas(expath)
# if 'locus2' not in ex:
# ex['locus2'] = UT.calc_locus_strand(ex)
# if '_id' not in ex:
# UT.set_ids(ex)
# e2l = UT.df2dict(ex, '_id', 'locus2')
# ex2 = ex.groupby('locus2').first().reset_index()
# # maps: eid (_id) <=> locus2
# if UT.notstale([expath, cipath], covcipath, override):
# cc = UT.read_pandas(covcipath)
# else:
# if UT.notstale(expath, cipath, False): # you do not want to override ci
# ci = UT.read_pandas(cipath, names=['chr','st','ed','name','id'])
# else:
# ci = UT.chopintervals(ex2, cipath, idcol='_id')
# cc = calc_cov_mp(ci, bwpath, covcipath, np=np)
# if override or (not os.path.exists(covcipath)):
# # calc covci
# if not os.path.exists(cipath):
# ex = UT.read_pandas(expath)
# ci = UT.chopintervals(ex, cipath, idcol='_id')
# else:
# ci = UT.read_pandas(cipath, names=['chr','st','ed','name','id'])
# cc = calc_cov_mp(ci, bwpath, covcipath, np=np)
# else:
# cc = UT.read_pandas(covcipath)
if 'id' not in cc.columns:
cc['id'] = cc['sc1']
if 'pid' not in cc.columns:
cc['pid'] = cc['name'].astype(str).apply(lambda x: [int(y) for y in x.split(',')])
cc['name1'] = cc['pid']
#ccf = UT.flattendf(cc[['chr','st','ed','pid']], 'pid')
#ccfg = ccf.groupby('eid')
#df = ccfg[['chr']].first()
#df['st'] = ccfg['st'].min()
#df['ed'] = ccfg['ed'].max()
#df.reset_index(inplace=True)
df = ex[['_id','_pid']].rename(columns={'_id':'eid','_pid':'pid'})
e2cs = calc_ecov_mp(cc, None, np, blocksize) # pid => cov
# l2cs = {e2l[x]: e2cs[x] for x in e2cs} # locus2 => cov
# ex['ecov'] = [l2cs[x] for x in ex['locus2']]
df['ecov'] = [e2cs[x] for x in df['pid']]
# UT.save_tsv_nidx_whead(ex[['_id','ecov']], ecovpath)
# return ex
UT.save_tsv_nidx_whead(df[['eid','pid','ecov']], ecovpath)
return df
# [TODO] only output _gidx, gcov
def calc_gcov(expath, cipath, bwpath, dstprefix, override=False, np=4):
"""Calculate gene coverages.
Args:
expath: merged ex
cipath: chopped interval for ex
bwpath: bigwig file (sample)
dstprefix: prefix for outputs
Outputs:
1. dstprefix+'.covci.txt.gz'
2. dstprefix+'.gcov.txt.gz' : DataFrame(col:_gidx,len,val,gcov,len2,gcov2,cids)
len2: calculate length from ci with cov > 0
(normal length = use entire ci's belonging to the gene)
gcov2 = val/len2
cids: cid with cov > for the gene ','.joined
"""
ex = UT.read_pandas(expath)
covcipath = dstprefix+'covci.txt.gz'
gcovpath = dstprefix+'gcov.txt.gz'
if UT.notstale([expath, cipath], covcipath, override):
cc = UT.read_pandas(covcipath)
else:
if UT.notstale(expath, cipath, False):
ci = UT.read_pandas(cipath, names=['chr','st','ed','name','id'])
else:
ci = UT.chopintervals(ex, cipath, idcol='_id')
cc = calc_cov_mp(ci, bwpath, covcipath, np=np)
# if override or (not os.path.exists(covcipath)):
# # calc covci
# if not os.path.exists(cipath):
# ci = UT.chopintervals(ex, cipath, idcol='_id')
# else:
# ci = UT.read_pandas(cipath, names=['chr','st','ed','name','id'])
# cc = calc_cov_mp(ci, bwpath, covcipath, np=np)
# else:
# cc = UT.read_pandas(covcipath)
if 'id' not in cc.columns:
cc['id'] = cc['sc1']
if 'eid' not in cc.columns:
cc['eid'] = cc['name'].astype(str).apply(lambda x: [int(y) for y in x.split(',')])
cc['len'] = cc['ed']-cc['st']
cc['val'] = cc['cov']*cc['len']
ccf = UT.flattendf(cc[['id','eid','len','val','st','ed']], 'eid')
e2g = dict(UT.izipcols(ex, ['_id','_gidx']))
ccf['_gidx'] = [e2g[x] for x in ccf['eid']]
# for normal gcov: take unique combination of (gid, id) (id=cid)
# for gocv2 : first select ccf with val>0
ccf2 = ccf[ccf['val']>0].groupby(['_gidx','id']).first().reset_index()
ccf2g = ccf2.groupby('_gidx')
df2 = ccf2g[['len','val']].sum()
df2['gcov2'] = df2['val']/df2['len']
df2['cids'] = ccf2g['id'].apply(lambda x: ','.join([str(y) for y in x]))
df2['gst2'] = ccf2g['st'].min()
df2['ged2'] = ccf2g['ed'].max()
df2['glen2'] = df2['ged2']-df2['gst2']
df2 = df2.reset_index()
ccf1 = ccf.groupby(['_gidx','id']).first().reset_index()
ccf1g = ccf1.groupby('_gidx')
df = ccf1g[['len','val']].sum()
df['gcov'] = df['val']/df['len']
df['st'] = ccf1g['st'].min()
df['ed'] = ccf1g['ed'].max()
df['glen'] = df['ed'] - df['st']
df = df.reset_index()
g2chr = dict(UT.izipcols(ex, ['_gidx','chr']))
df['chr'] = [g2chr[x] for x in df['_gidx']]
def _set_df2prop(src,tgt, default):
dic = dict(UT.izipcols(df2, ['_gidx',src]))
df[tgt] = [dic.get(x,default) for x in df['_gidx']]
_set_df2prop('gcov2','gcov2', 0)
_set_df2prop('len','len2', 0)
_set_df2prop('cids','cids', '')
_set_df2prop('gst2','st2', -1)
_set_df2prop('ged2','ed2', -1)
_set_df2prop('glen2','glen2', 0)
cols = ['_gidx','chr','st','ed','len','val','gcov','glen','len2','gcov2','cids','st2','ed2','glen2']
cols = ['_gidx', 'gcov']
df = df[cols]
UT.save_tsv_nidx_whead(df, gcovpath)
return df
# just use trimed ex to calculate gcov using calc_gcov
# def calc_gcov1000(expath, cipath, bwpath, dstprefix, override=False, np=4):
# """Calculate gene coverage but only use 1000bp from 3' end.
# Args:
# expath: merged ex
# cipath: chopped interval for ex
# bwpath: bigwig file (sample)
# dstprefix: prefix for outputs
# 1. dstprefix+'.covci.txt.gz'
# 2. dstprefix+'.gcov.txt.gz' : DataFrame(col:_gidx,len,val,gcov,cids)
# cids: cid with cov > for the gene ','.joined
# """
# ex = UT.read_pandas(expath)
# covcipath = dstprefix+'.covci.txt.gz'
# gcovpath = dstprefix+'.gcov1000.txt.gz'
# if override or (not os.path.exists(covcipath)):
# # calc covci
# if not os.path.exists(cipath):
# ci = UT.chopintervals(ex, cipath, idcol='_id')
# else:
# ci = UT.read_pandas(cipath, names=['chr','st','ed','name','id'])
# cc = calc_cov_mp(ci, bwpath, covcipath, np=np)
# else:
# cc = UT.read_pandas(covcipath)
# if 'id' not in cc.columns:
# cc['id'] = cc['sc1']
# if 'eid' not in cc.columns:
# cc['eid'] = cc['name'].astype(str).apply(lambda x: map(int, x.split(',')))
# cc['len'] = cc['ed']-cc['st']
# cc['val'] = cc['cov']*cc['len']
# ccf = UT.flattendf(cc[['id','eid','len','val','st','ed']], 'eid')
# e2g = dict(UT.izipcols(ex, ['_id','_gidx']))
# ccf['_gidx'] = [e2g[x] for x in ccf['eid']]
# # for normal gcov: take unique combination of (gid, id) (id=cid)
# ccf1 = ccf.groupby(['_gidx','id']).first().reset_index()
# # MODIFICATION to only use <cumsum 2000bp
# g2strand = dict(UT.izipcols(ex.groupby('_gidx').first().reset_index(), ['_gidx','strand']))
# ccf1['strand'] = [g2strand[x] for x in ccf1['_gidx']]
# ccf1 = ccf1.sort_values(['_gidx','ed','st'],ascending=False)
# ccf1['cumsum+'] = ccf1.groupby('_gidx')['len'].cumsum()
# ccf1 = ccf1.sort_values(['_gidx','st','ed'],ascending=True)
# ccf1['cumsum-'] = ccf1.groupby('_gidx')['len'].cumsum()
# ccf1 = ccf1.sort_values(['_gidx','ed','st'],ascending=False)
# size1p = ccf1.groupby('_gidx')['cumsum+'].first().to_frame('v').reset_index()
# g2s1p = dict(UT.izipcols(size1p, ['_gidx','v']))
# ccf1 = ccf1.sort_values(['_gidx','st','ed'],ascending=True)
# size1n = ccf1.groupby('_gidx')['cumsum-'].first().to_frame('v').reset_index()
# g2s1n = dict(UT.izipcols(size1n, ['_gidx','v']))
# ccf1['size1+'] = [g2s1p[x] for x in ccf1['_gidx']]
# ccf1['size1-'] = [g2s1n[x] for x in ccf1['_gidx']]
# idx =(((ccf1['strand']=='+')&((ccf1['cumsum+']<2000)|(ccf1['size1+']>=2000)))|
# ((ccf1['strand']!='+')&((ccf1['cumsum-']<2000)|(ccf1['size1-']>=2000))))
# ccf2 = ccf1[idx]
# ccf1g = ccf2.groupby('_gidx')
# df = ccf1g[['len','val']].sum()
# df['gcov'] = df['val']/df['len']
# df['st'] = ccf1g['st'].min()
# df['ed'] = ccf1g['ed'].max()
# df['glen'] = df['ed'] - df['st']
# df['cids'] = ccf1g['id'].apply(lambda x: ','.join(map(str, x)))
# df = df.reset_index()
# g2chr = dict(UT.izipcols(ex, ['_gidx','chr']))
# df['chr'] = [g2chr[x] for x in df['_gidx']]
# cols = ['_gidx','chr','st','ed','len','val','gcov','glen','cids']
# df = df[cols]
# UT.save_tsv_nidx_whead(df, gcovpath)
# return df
# old method
# def calc_gene_cov(ex, cc, gidfld='_gidx'):
# """
# ex: exon df
# cc: covci df
# return dfg ['val','len','cov']
# """
# if 'id' not in cc.columns:
# cc['id'] = cc['sc1']
# if 'cid' not in ex.columns:
# e2c = {}
# for i,name in cc[['id','name']].values:
# for eid in map(int, name.split(',')):
# e2c.setdefault(eid,[]).append(i)
# ex['cid'] = [e2c[x] for x in ex['_id']]
# # flatten
# def _gen():
# for g,cids in UT.izipcols(ex, [gidfld,'cid']):
# for c in cids:
# yield (c,g)
# df = PD.DataFrame(list(set([x for x in _gen()])),columns=['cid',gidfld])
# cc['len'] = cc['ed'] - cc['st']
# cc['val'] = cc['cov']*cc['len']
# c2len = dict(cc[['id','len']].values)
# c2val = dict(cc[['id','val']].values)
# df['len'] = [c2len[x] for x in df['cid']]
# df['val'] = [c2val[x] for x in df['cid']]
# dfg = df.groupby(gidfld)[['val','len']].sum()
# dfg['cov'] = dfg['val']/dfg['len']
# return dfg
def calc_glen(ex, cipath):
ci = GGB.read_bed(cipath) # 5 col bed, name:eids, sc1:cid
ci['len'] = ci['ed']-ci['st']
ci['cid'] = ci['sc1']
c2l = dict(UT.izipcols(ci, ['cid','len']))
if 'cid' not in ex.columns:
e2c = {}
for i,name in ci[['cid','name']].values:
for eid in name.split(','):
e2c.setdefault(int(eid),[]).append(i)
ex['cid'] = [e2c[x] for x in ex['_id']]
def _gen():
for g,cids in UT.izipcols(ex, ['_gidx','cid']):
for c in cids:
yield (c,g)
df = PD.DataFrame(list(set([x for x in _gen()])),columns=['cid','_gidx'])
df['len'] = [c2l[x] for x in df['cid']]
glen = df.groupby('_gidx')['len'].sum()
return dict(zip(glen.index, glen.values))
|
mit
|
XiaoxiaoLiu/morphology_analysis
|
bigneuron/pearsonr_votemap.py
|
1
|
2720
|
__author__ = 'xiaoxiaoliu'
__author__ = 'xiaoxiaoliu'
import pandas as pd
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
WORK_PATH = "/Users/xiaoxiaoliu/work"
p = WORK_PATH + '/src/morphology_analysis'
sys.path.append(p)
import blast_neuron.blast_neuron_comp as bn
import glob
import libtiff3d
from scipy.stats.stats import pearsonr
def read_tif(tif_file):
t = libtiff3d.TIFF3D.open(tif_file)
im = t.read_image() # z_dim, y_dim, x_dim
return im
gold_image_dir = WORK_PATH+"/data/gold79/origin_data"
gold_images = glob.glob(gold_image_dir+'/*/*/*.tif')
votemaps_dir = WORK_PATH+"/data/20151030_rhea_reconstructions_for_allen300_silver_set/votemaps"
vote_images = glob.glob(votemaps_dir+'/*.tif')
images=[]
gold_image_files=[]
vote_image_files=[]
pearsons=[]
dim_x=[]
dim_y=[]
dim_z=[]
pval=[]
for vote_image_file in vote_images:
Iv= read_tif(vote_image_file)
image_name = vote_image_file.split('/')[-1]
image_name = image_name.split('recons.ano')[0]
image_name = ".".join(image_name.split('.')[1:])
for gold_image_file in gold_images:
if image_name in gold_image_file:
Ig=read_tif(gold_image_file)
siz_x = min(Ig.shape[0], Iv.shape[0])
siz_y = min(Ig.shape[1], Iv.shape[1])
siz_z = min(Ig.shape[2], Iv.shape[2])
Ig_match = Ig[0:siz_x, 0:siz_y,0:siz_z]
Iv_match = Iv[0:siz_x, 0:siz_y,0:siz_z]
if (Ig.shape[0] < Iv.shape[0]) or (Ig.shape[1] < Iv.shape[1]) or (Ig.shape[2] < Iv.shape[2]):
print "wrong dim"
print Ig_match.shape
print Iv_match.shape
# plt.figure()
# plt.imshow(Iv_match[Iv_match.shape[0]/2,:,:])
# plt.show()
# plt.figure()
# plt.imshow(Ig_match[Ig_match.shape[0]/2,:,:])
# plt.show()
pr = pearsonr(Iv_match.flatten(), Ig_match.flatten())
pvalue = 0 #? place holder for pvalue
pearsons.append(pr[0])
pval.append(pvalue)
dim_x.append(Ig.shape[2])
dim_y.append(Ig.shape[1])
dim_z.append(Ig.shape[0])
images.append(image_name)
gold_image_files.append(gold_image_file)
vote_image_files.append(vote_image_file)
df=pd.DataFrame()
df['image'] = pd.Series(images)
df['gold_image_file'] = pd.Series(gold_image_files)
df['votemap_image_file'] = pd.Series(vote_image_files)
df['perasonr'] = pd.Series(pearsons)
df['pval']=pd.Series(pval)
df['dim_x'] = pd.Series(dim_x)
df['dim_y'] = pd.Series(dim_y)
df['dim_z'] = pd.Series(dim_z)
df.to_csv(votemaps_dir+"/pearsonr.csv", index=False)
|
gpl-3.0
|
demiangomez/Parallel.GAMIT
|
classes/pyZTD.py
|
1
|
7998
|
# -*- coding: utf-8 -*-
"""
Project: Parallel.GAMIT
Date: 6/18/20 14:28
Author: Demian D. Gomez
"""
import numpy as np
from pyETM import Polynomial
from pyETM import Periodic
from pyDate import Date
from scipy.stats import chi2
LIMIT = 2.5
class ZtdSoln(object):
def __init__(self, cnn, NetworkCode, StationCode, project):
self.rs = cnn.query_float('SELECT "Year", "DOY", "Date", "ZTD" FROM gamit_ztd '
'WHERE "Project" = \'%s\' AND "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\' '
'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"'
% (project, NetworkCode, StationCode), as_dict=True)
self.date = [Date(datetime=r['Date']) for r in self.rs]
self.t = np.array([d.fyear for d in self.date])
ts = np.arange(np.min(self.date[0].mjd), np.max(self.date[-1].mjd) + 1, 1)
self.ts = np.array([Date(mjd=tts).fyear for tts in ts])
self.ztd = np.array([r['ZTD'] for r in self.rs])
self.type = 'ztd'
self.stack_name = None
class Ztd(object):
def __init__(self, cnn, NetworkCode, StationCode, project, plotit=False):
self.NetworkCode = NetworkCode
self.StationCode = StationCode
self.soln = ZtdSoln(cnn, NetworkCode, StationCode, project)
# fit linear and periodic
self.polynomial = Polynomial(cnn, NetworkCode, StationCode, self.soln, self.soln.t)
self.periodic = Periodic(cnn, NetworkCode, StationCode, self.soln, self.soln.t)
shape = (self.polynomial.design.shape[0], self.polynomial.param_count + self.periodic.param_count)
self.A = np.ndarray(shape)
self.A[:, self.polynomial.column_index] = self.polynomial.design
# determine the column_index for all objects
col_index = self.polynomial.param_count
self.periodic.column_index = np.arange(col_index, col_index + self.periodic.param_count)
self.A[:, self.periodic.column_index] = self.periodic.design
x, sigma, index, residuals, fact, _ = self.adjust_lsq(self.A, self.soln.ztd)
self.C = np.array(x)
self.F = np.array(index)
self.R = np.array(residuals)
self.factor = np.array(fact)
# continuous solution
shape = (self.soln.ts.shape[0], self.polynomial.param_count + self.periodic.param_count)
self.As = np.ndarray(shape)
self.As[:, self.polynomial.column_index] = self.polynomial.get_design_ts(self.soln.ts)
self.As[:, self.periodic.column_index] = self.periodic.get_design_ts(self.soln.ts)
if plotit:
self.plot()
def plot(self, pngfile=None, t_win=None, residuals=False, plot_missing=True,
ecef=False, plot_outliers=True, fileio=None):
import matplotlib.pyplot as plt
# determine the window of the plot, if requested
if t_win is not None:
if type(t_win) is tuple:
# data range, with possibly a final value
if len(t_win) == 1:
t_win = (t_win[0], self.soln.t.max())
else:
# approximate a day in fyear
t_win = (self.soln.t.max() - t_win / 365.25, self.soln.t.max())
fig, ax = plt.subplots(figsize=(15, 7))
ax.set_title('Zenith total delay %s.%s' % (self.NetworkCode, self.StationCode))
if residuals:
ax.plot(self.soln.t, self.soln.ztd - np.dot(self.A, self.C), 'ob', markersize=2)
else:
ax.plot(self.soln.t, self.soln.ztd, 'ob', markersize=2)
ax.plot(self.soln.ts, np.dot(self.As, self.C), 'r')
ax.set_ylabel('ZTD [m]')
ax.grid(True)
# window data
self.set_lims(t_win, plt, ax)
plt.savefig('test.png')
plt.close()
def set_lims(self, t_win, plt, ax):
if t_win is None:
# turn on to adjust the limits, then turn off to plot jumps
ax.autoscale(enable=True, axis='x', tight=False)
ax.autoscale(enable=False, axis='x', tight=False)
ax.autoscale(enable=True, axis='y', tight=False)
ax.autoscale(enable=False, axis='y', tight=False)
else:
if t_win[0] == t_win[1]:
t_win[0] = t_win[0] - 1./365.25
t_win[1] = t_win[1] + 1./365.25
plt.xlim(t_win)
self.autoscale_y(ax)
@staticmethod
def autoscale_y(ax, margin=0.1):
"""This function rescales the y-axis based on the data that is visible given the current xlim of the axis.
ax -- a matplotlib axes object
margin -- the fraction of the total height of the y-data to pad the upper and lower ylims"""
def get_bottom_top(line):
xd = line.get_xdata()
yd = line.get_ydata()
lo, hi = ax.get_xlim()
y_displayed = yd[((xd > lo) & (xd < hi))]
h = np.max(y_displayed) - np.min(y_displayed)
bot = np.min(y_displayed) - margin * h
top = np.max(y_displayed) + margin * h
return bot, top
lines = ax.get_lines()
bot, top = np.inf, -np.inf
for line in lines:
new_bot, new_top = get_bottom_top(line)
if new_bot < bot:
bot = new_bot
if new_top > top:
top = new_top
if bot == top:
ax.autoscale(enable=True, axis='y', tight=False)
ax.autoscale(enable=False, axis='y', tight=False)
else:
ax.set_ylim(bot, top)
def adjust_lsq(self, A, L):
cst_pass = False
iteration = 0
factor = 1
So = 1
dof = (A.shape[0] - A.shape[1])
X1 = chi2.ppf(1 - 0.05 / 2, dof)
X2 = chi2.ppf(0.05 / 2, dof)
s = np.array([])
v = np.array([])
C = np.array([])
P = np.ones((A.shape[0]))
while not cst_pass and iteration <= 10:
W = np.sqrt(P)
Aw = np.multiply(W[:, None], A)
Lw = np.multiply(W, L)
C = np.linalg.lstsq(Aw, Lw, rcond=-1)[0]
v = L - np.dot(A, C)
# unit variance
So = np.sqrt(np.dot(v, np.multiply(P, v)) / dof)
x = np.power(So, 2) * dof
# obtain the overall uncertainty predicted by lsq
factor = factor * So
# calculate the normalized sigmas
s = np.abs(np.divide(v, factor))
if x < X2 or x > X1:
# if it falls in here it's because it didn't pass the Chi2 test
cst_pass = False
# reweigh by Mike's method of equal weight until 2 sigma
f = np.ones((v.shape[0], ))
# f[s > LIMIT] = 1. / (np.power(10, LIMIT - s[s > LIMIT]))
# do not allow sigmas > 100 m, which is basically not putting
# the observation in. Otherwise, due to a model problem
# (missing jump, etc) you end up with very unstable inversions
# f[f > 500] = 500
sw = np.power(10, LIMIT - s[s > LIMIT])
sw[sw < np.finfo(np.float).eps] = np.finfo(np.float).eps
f[s > LIMIT] = sw
P = np.square(np.divide(f, factor))
else:
cst_pass = True
iteration += 1
# make sure there are no values below eps. Otherwise matrix becomes singular
P[P < np.finfo(np.float).eps] = 1e-6
# some statistics
SS = np.linalg.inv(np.dot(A.transpose(), np.multiply(P[:, None], A)))
sigma = So*np.sqrt(np.diag(SS))
# mark observations with sigma <= LIMIT
index = s <= LIMIT
return C, sigma, index, v, factor, P
if __name__ == '__main__':
import dbConnection
cnn = dbConnection.Cnn('gnss_data.cfg')
ztd = Ztd(cnn, 'cap', 'ecgm', 'igs-sirgas')
ztd.plot('test.png', residuals=True)
|
gpl-3.0
|
pandeylab/pyquant
|
pyquant/tests/test_targeted.py
|
1
|
1201
|
import os
import subprocess
import unittest
import pandas as pd
import numpy as np
import tempfile
from pyquant.tests.mixins import FileMixins
from pyquant.tests import config
class TestTargeted(FileMixins, unittest.TestCase):
def setUp(self):
super(TestTargeted, self).setUp()
self.output = os.path.join(self.out_dir, 'targeted')
def test_pyquant_trypsin(self):
# This searches for the y1 ions of arginine. It also is a check that the label-scheme parameter works.
f = tempfile.NamedTemporaryFile('w')
f.write('\t'.join(['0', 'R', '10.008269', 'R10'])+'\n')
f.write('\t'.join(['1', 'R', '0', 'Light'])+'\n')
f.seek(0)
com = [self.executable, '--scan-file', self.ecoli_mzml, '-p', str(config.CORES), '-o', self.output, '--msn-ion', '175', '--html', '--label-scheme', f.name]
subprocess.call(com)
pyquant = pd.read_table(self.output)
label = 'R10'
pq_sel = '{}/Light'.format(label)
pyquant[pq_sel] = np.log2(pyquant[pq_sel]+0.000001)
r10_med = pyquant[pq_sel].median()
self.assertNotAlmostEqual(r10_med, -2.0, places=2)
if __name__ == '__main__':
unittest.main()
|
mit
|
pombredanne/metamorphosys-desktop
|
metamorphosys/META/models/DynamicsTeam/RollingWheel/post_processing/common/post_processing_class.py
|
18
|
28308
|
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
import os
import json
import sys
import re
import numpy as np
from py_modelica.mat_file_functions.mat_file_to_dict import MatFile2Dict
import matplotlib.pyplot as plt
# Rescue if the limit-checking should get stuck in an infinite while-loop.
# Which should be impossible to start with, but if I am wrong...
MAX_ITERATIONS = 100000
class PostProcess:
filter = [] # list of all variables/parameter to load from mat-file
# (does not need to include 'time' - loaded by default)
time = None
result = None
def __init__(self, mat_file='', filter=None):
"""
Loads in mat-file, extracts given variables in filter (time always included)
and converts lists of values into numpy arrays.
These are stored in result as:
{{name1: array([values1])}, ..., {nameN: array([valuesN])}}
"""
mat_converter = MatFile2Dict(mat_file, filter, False)
result_lists = mat_converter.get_results()
# convert lists into numpy arrays
self.result = {}
for item in result_lists.iteritems():
self.result.update({item[0]: np.array(item[1])})
self.time = self.result['time']
def data_array(self, name):
"""
Get time-series in numpy array format.
name - name of variable
e.g. data_array('time')
returns with the time.
"""
return self.result[name]
def print_data(self, name):
"""
Prints the time-series.
name - name of variable
e.g. data_array('time')
returns with the time.
"""
data = self.data_array(name)
print 'name of data: '
print name
print 'here is the data: (with index)'
print '[',
for i in xrange(data.size - 1):
print str(i) + ':', str(data[i]) + ',',
print str(i + 1) + ':', str(data[i + 1]) + ']'
return data
def save_as_svg(self, name, metric_value, metric_name='metric_name', formula='', unit=''):
metric_array = np.ones(len(self.time)) * metric_value
plt.plot(self.time, self.data_array(name))
plt.plot(self.time, metric_array)
plt.plot()
plt.title('{0}\n{1}'.format(metric_name, formula))
plt.xlabel('time\n[s]')
if unit:
plt.ylabel('{0}\n[{1}]'.format(name, unit))
else:
plt.ylabel(name)
if not os.path.isdir('plots'):
os.mkdir('plots')
plot_path = os.path.join('plots', '{0}.svg'.format(metric_name))
plt.savefig(plot_path)
plt.close()
with open('testbench_manifest.json', 'r') as f_in:
sum_rep_json = json.load(f_in)
sum_rep_json['Artifacts'].append(plot_path.replace(os.path.sep, '/'))
with open('testbench_manifest.json', 'wb') as f_out:
json.dump(sum_rep_json, f_out, indent=4)
return plot_path
def time_array(self):
"""
Get time-series of time in numpy array format.
"""
return self.time
def print_time(self):
"""
Prints and returns with time-series of time.
"""
time = self.time
print 'here are time intervals:', time
return time
def short_array(self, name, start=0, end=-1):
"""
Get a truncated, from n1 to n2 array for variable name
name - name of variable
start - start index of interval
end - end index of interval
N.B index goes from 0 to len(array)-1
"""
return self.result[name][start:end]
def plot(self, name):
"""
Returns a tuple, suitable for plotting, of the variable's time-series together with time.
name - name of variable
"""
return self.data_array(name), self.time
def get_data_by_time(self, name, time_val):
"""
Get data based on time value.
name - name of variable to consider
time_val - time point where to extract the value
Returns the data and the index of the data
"""
i = 0
time = self.time
while time[i] < time_val and i in xrange(time.size - 1):
i += 1
data_arr = self.data_array(name)
if time[i - 1] != time_val:
cur = data_arr[i - 1]
next = data_arr[i]
data = time[i - 1] / ((time[i - 1] + time[i]) / 2) * (next - cur) + cur
else:
data = data_arr[i - 1]
return data, i
def get_data_by_index(self, name, index):
return self.data_array(name)[index]
def get_index_from_time(self, time_val):
"""
Get index based on time value.
time_val - time point where to extract the value
Returns index nearest to time_val
"""
i = 0
time = self.time
while time[i] < time_val and i in xrange(time.size-1):
i += 1
return i
def get_time(self, name, value, atol=1e-4, rtol=1e-4, start_index=0, end_index=-1):
"""
Gets the first time point where the variable satisfies either atol or rtol,
if no such point exists - returns with -1.
name - name of variable
atol - absolute tolerance
rtol - relative tolerance
"""
index = -1
# N.B. this is only one of many ways to do this
denominator = 1
if value > rtol:
denominator = value
data = self.data_array(name)[start_index:end_index]
cnt = 0
for x in data:
abs_diff = abs(x - value)
rel_diff = abs_diff / denominator
if abs_diff < atol or rel_diff < rtol:
index = cnt
break
else:
cnt += 1
if index >= 0:
return self.time[start_index + index]
return -1
def last_value(self, name):
"""
Get last value of variable
name - name of variable
"""
return self.data_array(name)[-1]
def global_max(self, name):
"""
Get maximum value of variable
name - name of variable
"""
return self.data_array(name).max()
def global_max_time(self, name):
"""
Get time where max occurs
name - name of variable
returns the time at where the max is
"""
index = self.data_array(name).argmax()
time_at_max = self.time[index]
return time_at_max
def global_min(self, name):
"""
Get minimum value of variable
name - name of variable
"""
return self.data_array(name).min()
def global_min_time(self, name):
"""
Get time where min occurs
name - name of variable
returns the time at where the min is
"""
index = self.data_array(name).argmin()
time_at_min = self.time[index]
return time_at_min
def global_abs_max(self, name):
"""
Get the maximum absolute value of variable
name - name of variable
"""
return np.absolute(self.data_array(name)).max()
def std_dev(self, name):
"""
Returns the standard deviation of variable
name - name of variable
"""
stddev = self.data_array(name).std()
return stddev
def variance(self, name):
"""
Returns the variance of variable
name - name of variable
"""
variance = self.data_array(name).var()
return variance
def sum_value(self, name):
"""
Returns the sum of the time-series for the variable
name - name of variable
"""
result = self.data_array(name).sum()
return result
def mean(self, name):
"""
Returns the mean of the time-series for the variable
name - name of variable
"""
result = np.mean(self.data_array(name), dtype=np.float64)
return result
def integrate(self, name):
"""
Returns the area under the curve of the time-series for the variable
name - name of variable
"""
time = self.time
data = self.data_array(name)
sum = 0
next = data[0]
next_t = time[0]
for i in xrange(data.size):
cur = next
next = data[i]
cur_t = next_t
next_t = time[i]
height = (next + cur) / 2
interval = next_t - cur_t
sum += height * interval
return sum
def minima(self, name):
"""
Returns the minima of time-series of variable
name - name of variable
"""
data = self.data_array(name)
min = []
prev = 0
cur = 0
next = data[0]
for i in xrange(data.size):
if cur < prev and cur <= next:
min.append(cur)
prev = cur
cur = next
next = data[++i]
minimum = np.array(min)
return minimum
def maxima(self, name):
"""
Returns the maxima of time-series of variable
name - name of variable
"""
data = self.data_array(name)
max = []
prev = 0
cur = 0
next = data[0]
for i in xrange(data.size):
if cur >= prev and cur > next:
max.append(cur)
prev = cur
cur = next
next = data[++i]
maximum = np.array(max)
return maximum
def pos_neg(self, name, tol=0.00000015):
"""
Returns time of the roots from positive to negative of time-series of variable
name - name of variable
tol - tolerance
"""
data = self.data_array(name)
time_arr = self.time
time = []
next = -1
for i in xrange(data.size):
cur = next
next = data[i]
if cur > 0 + tol and next <= 0 + tol:
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time.append((cur / (cur + next) / 2) * (next_t - cur_t) + cur_t)
else:
time.append(time_arr[i - 1])
timing = np.array(time)
return timing
def neg_pos(self, name, tol=0.00000015):
"""
Returns time of the roots from negative to positive of time-series of variable
name - name of variable
tol - tolerance
"""
time = []
data = self.data_array(name)
time_arr = self.time
next = 1
for i in xrange(data.size):
cur = next
next = data[i]
if cur <= 0 + tol and next > 0 + tol:
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time.append(cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t)
else:
time.append(time_arr[i - 1])
timing = np.array(time)
return timing
def to_zero(self, name, value_index):
"""
# time from a number to zero
# (use index from print_data() function)
# parameters: data array, time array, index of value
# returns the time of the zero
"""
data = self.data_array(name)
time_arr = self.time
i = value_index + 1
cur = data[value_index]
next = data[i]
tolerance = 0.00000015
if data[value_index] >= 0:
while next >= 0 + tolerance and i in xrange(data.size - 1):
i += 1
cur = next
next = data[i]
if next >= 0 + tolerance:
return -1
else:
while next <= 0 + tolerance and i in xrange(data.size - 1):
i += 1
cur = next
next = data[i]
if next <= 0 + tolerance:
return -1
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time = cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t
else:
time = time_arr[i - 1]
return time
def from_zero(self, name, value_index):
"""
# time from a number to zero
# (use index from print_data() function)
# parameters: data array, time array, index of value
# returns the time of the zero
"""
data = self.data_array(name)
time_arr = self.time
i = value_index - 1
cur = data[value_index]
next = data[i]
tolerance = 0.00000015
if data[value_index - 1] >= 0:
while next >= 0 + tolerance and i in xrange(data.size):
i -= 1
cur = next
next = data[i]
if next >= 0 + tolerance:
return -1
else:
while next <= 0 + tolerance and i in xrange(data.size):
i -= 1
cur = next
next = data[i]
if next <= 0 + tolerance:
return -1
if cur != 0:
cur_t = time_arr[i + 1]
next_t = time_arr[i]
time = cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t
else:
time = time_arr[i + 1]
return time
def zeros(self, name):
"""
Find zeros of time-series for variable
name - name of variable
returns the time of the zero
"""
data_array = self.data_array(name)
time = self.time
data = [[], []]
data[0].append(self.pos_neg(data_array, time))
data[1].append(self.neg_pos(data_array, time))
data_arr = np.array(data)
return data_arr
def compare(self, name1, name2):
"""
Compare the time-series of two variables
name1 - name of variable 1
name2 - name of variable 2
returns true if the results are identical
"""
data1 = self.data_array(name1)
data2 = self.data_array(name2)
for i in xrange(data1.size):
if data1[i] != data2[i]:
return False
return True
def time_total(self, val1, val2):
# finding the difference between 2 times
time = abs(val2 - val1)
return time
def delta_t(self, start_index, end_index):
"""
Returns the length of the time-interval between to indices
"""
t1 = self.time[start_index]
t2 = self.time[end_index]
dt = t2 - t1
return dt
def get_local_max(self, name, start_index, end_index):
"""
Returns the value of the maximum between two indices
N.B. including both points
:param name:
:param start_index:
:param end_index:
"""
if end_index == -1:
maximum = self.data_array(name)[start_index:].max()
else:
maximum = self.data_array(name)[start_index:end_index + 1].max()
return maximum
def get_local_min(self, name, start_index, end_index):
"""
Returns the value of the minimum between two indices
N.B. including both points
"""
if end_index == -1:
minimum = self.data_array(name)[start_index:].min()
else:
minimum = self.data_array(name)[start_index:end_index + 1].min()
return minimum
def find_first_max_violation(self, name, value, start_index=0):
"""
Starting from start_index it looks for the first index where the
time-series has a value greater than value.
If it never occurs, it returns -1
"""
time_series = self.data_array(name)[start_index:]
n = len(time_series)
for i in range(n):
if time_series[i] > value:
return i + start_index
return -1
def find_first_min_violation(self, name, value, start_index=0):
"""
Starting from start_index it looks for the first index where the
time-series has a value less than value.
If it never occurs, it returns -1
"""
time_series = self.data_array(name)[start_index:]
n = len(time_series)
for i in range(n):
if time_series[i] < value:
return i + start_index
return -1
def check_max_limit(self, name, value):
actual_value = ''
limit_exceeded = False
start_index = 0
global_max = -np.Inf
cnt = 0
print 'check_max_limit'
while start_index > -1:
index = self.find_first_max_violation(name, value, start_index)
if index > -1:
end_index = self.find_first_min_violation(name, value, index)
d_t = self.delta_t(index, end_index)
print 'Found violation at t={0} lasting : {1}'.format(self.time[index], d_t)
if d_t > 0.5:
limit_exceeded = True
local_max = self.get_local_max(name, index, end_index)
print 'Local maximum : {0}'.format(local_max)
if local_max > global_max:
global_max = local_max
start_index = end_index
else:
break
cnt += 1
if cnt == MAX_ITERATIONS:
print 'Limit checking for variable {0} aborted after {1} iterations' \
.format(name, MAX_ITERATIONS)
sys.exit(1)
if limit_exceeded:
actual_value = global_max
return limit_exceeded, actual_value
def check_min_limit(self, name, value):
actual_value = ''
limit_exceeded = False
start_index = 0
global_min = np.Inf
cnt = 0
print 'check_min_limit'
while start_index > -1:
index = self.find_first_min_violation(name, value, start_index)
if index > -1:
end_index = self.find_first_max_violation(name, value, index)
d_t = self.delta_t(index, end_index)
print 'Found violation at t={0} lasting : {1} s'.format(self.time[index], d_t)
if d_t > 0.5:
limit_exceeded = True
local_min = self.get_local_min(name, index, end_index)
print 'Local minimum : {0}'.format(local_min)
if local_min < global_min:
global_min = local_min
start_index = end_index
else:
break
cnt += 1
if cnt == MAX_ITERATIONS:
print 'Limit checking for variable {0} aborted after {1} iterations' \
.format(name, MAX_ITERATIONS)
sys.exit(1)
if limit_exceeded:
actual_value = global_min
return limit_exceeded, actual_value
def update_metrics_in_report_json(metrics, report_file='testbench_manifest.json'):
"""
Metrics should be of the form
:param metrics:
:param report_file:
{'name_of_metric' : {value: (int) or (float), unit: ""}, ...}
"""
if not os.path.exists(report_file):
raise IOError('Report file does not exits : {0}'.format(report_file))
# read current summary report, which contains the metrics
with open(report_file, 'r') as file_in:
result_json = json.load(file_in)
assert isinstance(result_json, dict)
if 'Metrics' in result_json:
for metric in result_json['Metrics']:
if 'Name' in metric and 'Value' in metric:
if metric['Name'] in metrics.keys():
new_value = metrics[metric['Name']]['value']
new_unit = metrics[metric['Name']]['unit']
if new_unit is not None:
metric['Unit'] = new_unit
if new_value is not None:
metric['Value'] = str(new_value)
else:
pass
else:
print 'Metric item : {0} does not have right format'.format(metric)
pass
# update json file with the new values
with open(report_file, 'wb') as file_out:
json.dump(result_json, file_out, indent=4)
else:
print 'Report file {0} does not have any Metrics defined..'
pass
def read_limits():
"""
Reads in limits and modifies the ModelicaUri to the correct one.
Returns:
- the updated limit_dict
- the filter as a list
"""
with open('limits.json', 'r') as f_in:
limit_dict = json.load(f_in)
# use set to avoid checking for duplicates
filter = set()
for limit_item in limit_dict['LimitChecks']:
# drop first part of VariableFullPath update the limit_item
# once the limit.json is generated correctly these two lines can be dropped
# modelica_uri = '.'.join(.split('.')[1:])
# modelica_model_rel_uri = limit_item['VariableName']
# split_full_path = limit_item['LimitFullPath'].split('/')
# modelica_model = split_full_path[-2]
# cyphy_relative_uri = '{0}.{1}'.format(modelica_model, modelica_model_rel_uri)
# modelica_uri = modelica_uri.replace(modelica_model_rel_uri, cyphy_relative_uri)
# limit_item['VariableFullPath'] = modelica_uri
# limit_item['ComponentInstanceName'] = split_full_path[-3]
# filter out this variable in the .mat-file
filter.add(limit_item['VariableFullPath'])
# Code specific for FANG-I, with no defined VariableName from GME
# limit_var_name = limit_item['VariableName']
# limit_var_name = re.sub('\.u(.*)$', '', limit_item['VariableFullPath'])
# limit_var_name_split = limit_var_name.split('.')
# limit_var_name = limit_var_name_split[len(limit_var_name_split)-3] + '=>' + \
# limit_var_name_split[len(limit_var_name_split)-1]
# limit_item['LimitName'] = limit_var_name
filter = list(filter)
print "Variables for limit-checking : {0}".format(filter)
return limit_dict, filter
def check_limits_and_add_to_report_json(pp, limit_dict):
"""
Check the limits and write out dictionary to testbench_manifest.json
"""
assert isinstance(pp, PostProcess)
for limit_item in limit_dict['LimitChecks']:
modelica_uri = limit_item['VariableFullPath']
limit_value = limit_item['Value']
limit_type = limit_item['Type']
print "--== {0} ==--".format(modelica_uri)
print "Type of Limit : {0}".format(limit_type)
print "Limit : {0} ".format(limit_value)
if limit_type == 'min':
limit_exceeded, actual_value = pp.check_min_limit(modelica_uri, limit_value)
limit_item['LimitExceeded'] = limit_exceeded
limit_item['ActualValue'] = str(actual_value)
elif limit_type == 'max':
limit_exceeded, actual_value = pp.check_max_limit(modelica_uri, limit_value)
limit_item['LimitExceeded'] = limit_exceeded
limit_item['ActualValue'] = str(actual_value)
else:
limit_exceeded_max, actual_max_value = pp.check_max_limit(modelica_uri, limit_value)
limit_exceeded_min, actual_min_value = pp.check_min_limit(modelica_uri, -limit_value)
# determine the actual value depending on which limits were exceeded
if limit_exceeded_max and limit_exceeded_min:
if actual_max_value > abs(actual_min_value):
actual_value = str(actual_max_value)
else:
actual_value = str(abs(actual_min_value))
elif limit_exceeded_max:
actual_value = str(actual_max_value)
elif limit_exceeded_min:
actual_value = str(abs(actual_min_value))
else:
actual_value = ''
limit_item['LimitExceeded'] = limit_exceeded_max or limit_exceeded_min
limit_item['ActualValue'] = actual_value
limit_item['Value'] = str(limit_value)
print "Violation : {0}".format(limit_item["LimitExceeded"])
with open('testbench_manifest.json', 'r') as f_in:
sum_rep_json = json.load(f_in)
sum_rep_json['LimitChecks'] = limit_dict['LimitChecks']
with open('testbench_manifest.json', 'wb') as f_out:
json.dump(sum_rep_json, f_out, indent=4)
print "Limits updated"
|
mit
|
FedericoMuciaccia/SistemiComplessi
|
src/percolation plotting.py
|
1
|
9118
|
# coding: utf-8
# In[3]:
import numpy, networkx, pandas
# import graph_tool
# from graph_tool.all import *
from matplotlib import pyplot
get_ipython().magic(u'matplotlib inline')
# In[ ]:
# In[ ]:
# In[ ]:
# In[2]:
gestori = ["Tim", "Tre"]
colori = ['#004184','#018ECC']
# In[3]:
# plotting
# legge i dati scritti su disco,
# in modo da non dover rifare sempre il calcolo
# e rendere indipendente dal resto questo blocco di codice
for provider, colore in zip(gestori, colori):
failureResults = pandas.read_csv('../data/percolation/randomFailure_{0}.csv'.format(provider))
attackResults = pandas.read_csv('../data/percolation/intentionalAttack_{0}.csv'.format(provider))
# TODO mettere colori corretti per i plot con tutti
# TODO levare l'asse superiore e l'asse destro
# facecolor='green',
# # **{'color': colore}
# TODO
#/usr/lib/python2.7/dist-packages/matplotlib/collections.py:571: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
# if self._edgecolors == str('face'):
# initialize the figure
#figura = pyplot.figure(figsize=(20,12))
pyplot.scatter(x = failureResults.percentuale,
y = failureResults.diameter,
s = 90,
#figsize = (16,12),
#grid = False,
# alpha = 0.8,
label = provider,
color = colore,
antialiased = True,
marker = 'o')
pyplot.alpha = 0.1
pyplot.xlim = (0, 101)
pyplot.ylim = (0, 15) # TODO
pyplot.title('Random failure')
pyplot.xlabel("Percentuale")
pyplot.ylabel("Diametro")
pyplot.legend(loc='best', frameon=False)
#pyplot.savefig('../img/percolation/randomFailure_diameter.eps', format='eps', dpi=600)
failureResults.plot(kind = 'scatter',
x = 'percentuale',
y = 'relativeGiantClusterSize',
s = 90,
title = 'Random failure',
xlim = (0, 101),
ylim = (0, 1.02),
figsize = (16,12),
grid = False,
alpha = 0.8,
label = 'Tre',
antialiased = True,
marker = 'o')
#pyplot.xlabel("Percentuale")
#pyplot.ylabel("Dimensione relativa del giant cluster")
#pyplot.savefig('../img/percolation/randomFailure_relativeGiantClusterSize.eps', format='eps', dpi=600)
attackResults.plot(kind = 'scatter',
x = 'percentuale',
y = 'diameter',
s = 90,
title = 'Intentional attack',
xlim = (0, 101),
ylim = (0, max(attackResults.diameter)+1),
figsize = (16,12),
grid = False,
alpha = 0.8,
label = 'Tre',
antialiased = True)
#pyplot.xlabel("Percentuale")
#pyplot.ylabel("Diametro")
#pyplot.savefig('../img/percolation/intentionalAttack_diameter.eps', format='eps', dpi=600)
attackResults.plot(kind = 'scatter',
x = 'percentuale',
y = 'relativeGiantClusterSize',
s = 90,
title = 'Intentional attack',
xlim = (0, 101),
ylim = (0, 1.02),
figsize = (16,12),
grid = False,
alpha = 0.8,
label = 'Tre',
antialiased = True)
#pyplot.xlabel("Percentuale")
#pyplot.ylabel("Dimensione relativa del giant cluster")
#pyplot.savefig('../img/percolation/intentionalAttack_relativeGiantClusterSize.eps', format='eps', dpi=600)
# pyplot.figure(1)
# pyplot.subplot(211)
# ...
# pyplot.subplot(212)
# ...
# x : label or position, default None
# y : label or position, default None
# subplots=True
# sharex=True
# layout= tuple (rows, columns)
# legend= False/True/’reverse’
#style : list or dict
# matplotlib line style per column
#logx : boolean, default False
# Use log scaling on x axis
#logy : boolean, default False
# Use log scaling on y axis
#loglog : boolean, default False
# Use log scaling on both x and y axes
#fontsize : int, default None
# Font size for xticks and yticks
#colormap : str or matplotlib colormap object, default None
# Colormap to select colors from. If string, load colormap with that name from matplotlib.
#colorbar : boolean, optional
# If True, plot colorbar (only relevant for ‘scatter’ and ‘hexbin’ plots)
#table : boolean, Series or DataFrame, default False
# If True, draw a table using the data in the DataFrame and the data will be transposed to meet matplotlib’s default layout. If a Series or DataFrame is passed, use passed data to draw a table.
#sort_columns : boolean, default False
# Sort column names to determine plot ordering
#secondary_y : boolean or sequence, default False
# Whether to plot on the secondary y-axis If a list/tuple, which columns to plot on secondary y-axis
#kwds : keywords
# Options to pass to matplotlib plotting method
# In[ ]:
percentuali = []
diametriFailure = []
for provider, colore in zip(gestori, colori):
failureResults = pandas.read_csv('../data/percolation/randomFailure_{0}.csv'.format(provider))
attackResults = pandas.read_csv('../data/percolation/intentionalAttack_{0}.csv'.format(provider))
percentuali.append(failureResults.percentuale)
diametriFailure.append(failureResults.diameter)
pyplot.scatter(x = failureResults.percentuale,
y = failureResults.diameter,
#s = 90,
#figsize = (16,12),
#grid = False,
alpha = 0.8,
#label = provider,
color = colore,
antialiased = True,
marker = 'o')
pyplot.xlim = (0, 101)
pyplot.ylim = (0, 15) # TODO
pyplot.title('Random failure')
pyplot.xlabel("Percentuale")
pyplot.ylabel("Diametro")
pyplot.legend(loc='best', frameon=False)
#pyplot.savefig('../img/percolation/randomFailure_diameter.eps', format='eps', dpi=600)
# # GRAFICI seabornosi
# In[12]:
import pandas
from matplotlib import pyplot
gestori = ["Tim", "Vodafone", "Wind", "Tre"]
colori = ['#004184','#ff3300','#ff8000','#018ECC']
get_ipython().magic(u'matplotlib inline')
# In[13]:
failureFrames = []
attackFrames = []
for provider in gestori:
failureResults = pandas.read_csv('../data/percolation/randomFailure_{0}.csv'.format(provider))
attackResults = pandas.read_csv('../data/percolation/intentionalAttack_{0}.csv'.format(provider))
failureResults['Compagnia'] = provider
attackResults['Compagnia'] = provider
failureFrames.append(failureResults)
attackFrames.append(attackResults)
failureFinal = pandas.concat(failureFrames)
attackFinal = pandas.concat(attackFrames)
# In[14]:
import seaborn
#grafici attack
attackFinal.head()
seaborn.set_context("notebook", font_scale=1.1)
seaborn.set_style("ticks")
#grafico andamento D
seaborn.lmplot('percentuale', 'diameter', data=attackFinal, fit_reg=False,
size = 7, aspect = 1.7778,
hue='Compagnia', palette = colori,
scatter_kws={"marker": "D", "s": 100})
pyplot.title('Attacco: diametro')
pyplot.xlabel("%")
pyplot.ylabel("Valore")
pyplot.xlim(0, 100)
pyplot.ylim(0,60)
pyplot.savefig('../img/federico/attackD_Final', format='eps', dpi=1000)
#grafico andamento GC
seaborn.lmplot('percentuale', 'relativeGiantClusterSize', data=attackFinal, fit_reg=False,
size = 7, aspect = 1.7778,
hue='Compagnia', palette = colori,
scatter_kws={"marker": "D", "s": 100})
pyplot.title('Attacco: dimensioni relative del GC')
pyplot.xlabel("%")
pyplot.ylabel("Valore")
pyplot.xlim(0, 100)
pyplot.ylim(0,1.1)
pyplot.savefig('../img/federico/attackGC_Final', format='eps', dpi=1000)
# In[15]:
#grafici failure
failureFinal.head()
seaborn.set_context("notebook", font_scale=1.1)
seaborn.set_style("ticks")
#grafico andamento D
seaborn.lmplot('percentuale', 'diameter', data=failureFinal, fit_reg=False,
size = 7, aspect = 1.7778,
hue='Compagnia', palette = colori,
scatter_kws={"marker": "D", "s": 100})
pyplot.title('Random failure: diametro')
pyplot.xlabel("%")
pyplot.ylabel("Valore")
pyplot.xlim(0, 100)
#pyplot.ylim(0,max(diametro)+2)
pyplot.savefig('../img//federico/failureD_Final', format='eps', dpi=1000)
#grafico andamento GC
seaborn.lmplot('percentuale', 'relativeGiantClusterSize', data=failureFinal, fit_reg=False,
size = 7, aspect = 1.7778,
hue='Compagnia', palette = colori,
scatter_kws={"marker": "D", "s": 100})
pyplot.title('Random failure: dimensioni relative del GC')
pyplot.xlabel("%")
pyplot.ylabel("Valore")
pyplot.xlim(0, 100)
pyplot.ylim(0,1.1)
pyplot.savefig('../img/federico/failureGC_Final', format='eps', dpi=1000)
# In[ ]:
|
mit
|
dianachenyu/linear-svm-squared-hinge-loss
|
src/demo_real_world_data.py
|
1
|
4283
|
"""
Demo of the method on a real-world dataset
"""
# Author: Diana Chenyu Zhang <[email protected]>
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.linalg
import sklearn.preprocessing
import linear_svm_squared_hinge_loss
# Use the real-world datset, Spam dataset from book The Elements of Statistical Learning
spam = pd.read_table('https://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/spam.data', sep=' ', header=None)
test_indicator = pd.read_table('https://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/spam.traintest', sep=' ',
header=None)
x = np.asarray(spam)[:, 0:-1]
y = np.asarray(spam)[:, -1]*2 - 1
# Use the train-test split inidcator provided along with the dataset
test_indicator = np.array(test_indicator).T[0]
x_train = x[test_indicator == 0, :]
x_test = x[test_indicator == 1, :]
y_train = y[test_indicator == 0]
y_test = y[test_indicator == 1]
# Standardize the data
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# Use lambda = 1 first
lambduh = 1
n = np.size(x, 1)
eta_init = 1/(scipy.linalg.eigh(1/len(y_train)*x_train.T.dot(x_train), eigvals=(n-1, n-1), eigvals_only=True)[0]+lambduh)
betas = linear_svm_squared_hinge_loss.mylinearsvm(lambduh, eta_init, 100, x_train, y_train)
# Calculate misclassifcation error on training set and testing set
error_train_lambda1 = linear_svm_squared_hinge_loss.compute_misclassification_error(betas[-1,:],x_train,y_train)
error_test_lambda1 = linear_svm_squared_hinge_loss.compute_misclassification_error(betas[-1,:],x_test,y_test)
print('Misclassification error for the lambda value 1 on the training set is: ', error_train_lambda1)
print('Misclassification error for the lambda value 1 on the testing set is: ', error_test_lambda1)
# Plot miscalssification error and objective value
linear_svm_squared_hinge_loss.plot_misclassification_error(betas, x_train, y_train,
title='Training set misclassification error when lambda = 1',
file_name = 'misclass_plot_train_lambda1.png')
linear_svm_squared_hinge_loss.plot_misclassification_error(betas, x_test, y_test,
title='Test set misclassification error when lambda = 1',
file_name = 'misclass_plot_test_lambda1.png')
linear_svm_squared_hinge_loss.plot_objective(betas, lambduh, x_train, y_train, file_name = 'objective_plot_train_lambda1.png' )
# Find optimal value of lambda through cross-validation
optimal_lambduh1 = linear_svm_squared_hinge_loss.find_optimal_lambduh(x_test,y_test,eta_init,-1000,1000,10)
print('Optimal value of lambda is: ', optimal_lambduh1)
optimal_lambduh2 = linear_svm_squared_hinge_loss.find_optimal_lambduh(x_test,y_test,eta_init,-10,10,0.01)
print('Optimal value of lambda is: ', optimal_lambduh2)
# Calculate misclassifcation error on training set and testing set
betas_opt = linear_svm_squared_hinge_loss.mylinearsvm(optimal_lambduh2, eta_init, 100, x_train, y_train)
error_train_lambda_opt = linear_svm_squared_hinge_loss.compute_misclassification_error(betas_opt[-1,:],x_train,y_train)
error_test_lambda_opt = linear_svm_squared_hinge_loss.compute_misclassification_error(betas_opt[-1,:],x_test,y_test)
print('Misclassification error for the optimal lambda value on the training set is: ', error_train_lambda_opt)
print('Misclassification error for the optimal lambda value on the testing set is: ', error_test_lambda_opt)
# Plot miscalssification error and objective value
linear_svm_squared_hinge_loss.plot_misclassification_error(betas_opt, x_train, y_train,
title='Training set misclassification error for the optimal lambda value.',
file_name = 'misclass_plot_train_lambda_opt.png')
linear_svm_squared_hinge_loss.plot_misclassification_error(betas_opt, x_test, y_test,
title='Test set misclassification error for the optimal lambda value.',
file_name = 'misclass_plot_test_lambda_opt.png')
linear_svm_squared_hinge_loss.plot_objective(betas_opt, optimal_lambduh2, x_train, y_train, file_name = 'objective_plot_train_lambda_opt.png')
|
mit
|
gef756/statsmodels
|
statsmodels/tools/print_version.py
|
4
|
8025
|
#!/usr/bin/env python
from __future__ import print_function
from statsmodels.compat.python import reduce
import sys
from os.path import dirname
def safe_version(module, attr='__version__', *others):
if not isinstance(attr, list):
attr = [attr]
try:
return reduce(getattr, [module] + attr)
except AttributeError:
if others:
return safe_version(module, others[0], *others[1:])
return "Cannot detect version"
def _show_versions_only():
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s' % safe_version(version, 'full_version'))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s" % safe_version(Cython))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s" % safe_version(numpy, ['version', 'version']))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s" % safe_version(scipy, ['version', 'version']))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s" % safe_version(pandas))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s" % safe_version(dateutil))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s" % safe_version(patsy))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s" % safe_version(mpl))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s" % safe_version(info, 'version'))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s" % safe_version(IPython))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s" % safe_version(jinja2))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s" % safe_version(sphinx))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s" % safe_version(pygments))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s" % safe_version(nose))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s" % safe_version(virtualenv))
except ImportError:
print("virtualenv: Not installed")
print("\n")
def show_versions(show_dirs=True):
if not show_dirs:
_show_versions_only()
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
import statsmodels
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s (%s)' % (safe_version(version, 'full_version'),
dirname(statsmodels.__file__)))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s (%s)" % (safe_version(Cython),
dirname(Cython.__file__)))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s (%s)" % (safe_version(numpy, ['version', 'version']),
dirname(numpy.__file__)))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s (%s)" % (safe_version(scipy, ['version', 'version']),
dirname(scipy.__file__)))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s (%s)" % (safe_version(pandas, ['version', 'version'],
'__version__'),
dirname(pandas.__file__)))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s (%s)" % (safe_version(dateutil),
dirname(dateutil.__file__)))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s (%s)" % (safe_version(patsy),
dirname(patsy.__file__)))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s (%s)" % (safe_version(mpl),
dirname(mpl.__file__)))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s (%s)" % (safe_version(info, 'version'),
dirname(info.__file__)))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s (%s)" % (safe_version(IPython),
dirname(IPython.__file__)))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s (%s)" % (safe_version(jinja2),
dirname(jinja2.__file__)))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s (%s)" % (safe_version(sphinx),
dirname(sphinx.__file__)))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s (%s)" % (safe_version(pygments),
dirname(pygments.__file__)))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s (%s)" % (safe_version(nose), dirname(nose.__file__)))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s (%s)" % (safe_version(virtualenv),
dirname(virtualenv.__file__)))
except ImportError:
print("virtualenv: Not installed")
print("\n")
if __name__ == "__main__":
show_versions()
|
bsd-3-clause
|
bioinformatics-centre/AsmVar
|
src/AsmvarVarScore/FeatureToScore.py
|
2
|
12602
|
"""
========================================================
Statistic the SV Stat after AGE Process
========================================================
Author: Shujia Huang & Siyang Liu
Date : 2014-03-07 0idx:54:15
"""
import sys
import re
import os
import string
import numpy as np
import matplotlib.pyplot as plt
def DrawFig(figureFile, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden):
fig = plt.figure(num=None, figsize=(16, 30), facecolor='w', edgecolor='k')
title = ['Distance distribution', 'NRatio', 'Perfect Depth', 'Imperfect depth', '', '', '']
ylabel = ['The position of breakpoint', 'N Ratio of varints', 'Perfect Depth', \
'Both ImPerfect Depth', 'Map score', 'Mismapping Probability', 'Average Identity', 'ProperReadDepth', 'ImProperReadDepth']
for i, data in enumerate ([distance, nr, aa, bb, mscore, misprob, aveIden, properDepth, imProperDepth ]):
plt.subplot(9,2,2 * i + 1)
#plt.title(title[i], fontsize=16)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=0.05, linewidths = 0, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=0.05, linewidths = 0, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='o', c = 'Y', alpha=0.05, linewidths = 0, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper left')
plt.xlim(-20, 50)
plt.xlabel('Score' , fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(9, 2, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad] , data[:,2][NEW][bad] , marker='o', c = 'm', alpha=0.05, linewidths = 0, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=0.05, linewidths = 0, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.xlim(-20, 50)
plt.legend(loc='upper left')
plt.xlabel('Score' , fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def DrawPhredScale (figureFile, phredScal):
fig = plt.figure()
ylabel = ['Phred Scale']
for i, data in enumerate ([phredScal ]):
plt.subplot(2, 1, 2 * i + 1)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=0.5, linewidths = 0, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=0.5, linewidths = 0, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='o', c = 'Y', alpha=0.5, linewidths = 0, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper left')
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(2, 1, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad] , data[:,2][NEW][bad] , marker='o', c = 'm', alpha=0.5, linewidths = 0, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=0.5, linewidths = 0, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.legend(loc='upper left')
plt.xlabel('Score' , fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def Accum (data, isBig = False):
tmpD= data
k = sorted(tmpD.keys(), key = lambda d: float(d))
dat = []
for i in range(len(k)):
if isBig:
for j in range(i,len(k)): tmpD[k[i]][1] += tmpD[k[j]][0]
else:
for j in range(i+1): tmpD[k[i]][1] += tmpD[k[j]][0]
dat.append([float(k[i]), float(tmpD[k[i]][0]), float(tmpD[k[i]][1]) ])
return dat
def SampleFaLen (faLenFile):
if faLenFile[-3:] == '.gz': I = os.popen('gzip -dc %s' % faLenFile)
else : I = open(faLenFile)
data = {}
while 1:
lines = I.readlines (100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
data[col[0]] = string.atoi(col[1])
I.close()
return data
def LoadFaLen (faLenLstFile):
data = {}
I = open (faLenLstFile)
for line in I.readlines():
if len(line.strip('\n').split()) != 2: raise ValueError('[ERROR] The format of Fa length list maybe not right. It could just be: "sample FalenghtFile", but found',line)
sampleId, fileName = line.strip('\n').split()
if sampleId not in data: data[sampleId] = {}
data[sampleId] = SampleFaLen(fileName)
I.close()
return data
def main (argv):
qFaLen = LoadFaLen(argv[1])
figPrefix = 'test'
if len(argv) > 2: figPrefix = argv[2]
if argv[0][-3:] == '.gz':
I = os.popen('gzip -dc %s' % argv[0])
else:
I = open (argv[0])
s, annotations, mark = set(), [], []
print '#Chr\tPosition\tDistance\tLeftIden\tRightIden\tAveIden\tN-Ratio\tAA'
while 1: # VCF format
lines = I.readlines(100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
if re.search(r'^#CHROM', line): col2sam = { i+9:sam for i,sam in enumerate(col[9:]) }
if re.search(r'^#', line): continue
key = col[0] + ':' + col[1]
if key in s: continue
s.add(key)
#if re.search(r'^PASS', col[6]): continue
#if not re.search(r'_TRAIN_SITE', col[7]): continue
#if not re.search(r'^PASS', col[6]): continue
isbad = False
for i, sample in enumerate (col[9:]):
if re.search(r'NULL', sample): isbad = True
if isbad: continue
fmat = { k:i for i,k in enumerate(col[8].split(':')) }
if 'VS' not in fmat or 'QR' not in fmat: continue
if 'AGE' not in fmat: continue
if len(annotations) == 0: annotations = [[] for _ in col[9:] ]
vcfinfo = { d.split('=')[0]: d.split('=')[1] for d in col[7].split(';') if len(d.split('=')) == 2 }
vq = string.atof(vcfinfo['VQ'])
if ('POSITIVE_TRAIN_SITE' in col[7]) and ('NEGATIVE_TRAIN_SITE' in col[7]):
mark.append([3, vq])
elif 'POSITIVE_TRAIN_SITE' in col[7]:
mark.append([1, vq])
elif 'NEGATIVE_TRAIN_SITE' in col[7]:
mark.append([2, vq])
else:
mark.append([0, vq])
# GT:AA:AE:FN:MIP:MS:QR:RR:VS:VT
for i, sample in enumerate (col[9:]):
sampleId = col2sam[9+i]
field = sample.split(':')
if sample == './.' or len(field) < fmat['QR'] + 1 or field[fmat['QR']].split(',')[-1] == '.' or field[fmat['AS']] == '.':
annotations[i].append([0, 0, 0, 0, 0, 0, 0, 0, 0])
continue
qr = field[fmat['QR']].split(',')[-1]
qregion = np.array(qr.split('-'))
if len(qregion) > 3: qId = qregion[0] + '-' + qregion[1]
else : qId = qregion[0]
qSta = string.atoi(qregion[-2])
qEnd = string.atoi(qregion[-1])
if sampleId not in qFaLen: raise ValueError ('[ERROR] The sample name $s(in vcf) is not in the name of Fa list.' % sampleId)
if qId not in qFaLen[sampleId]: raise ValueError ('[ERROR]', qId, 'is not been found in file', opt.qFalen, '\n')
qSta= int(qSta * 100 / qFaLen[sampleId][qId] + 0.5)
qEnd= int(qEnd * 100 / qFaLen[sampleId][qId] + 0.5)
if qSta > 100 or qEnd > 100: raise ValueError ('[ERROR] Query size Overflow! sample: %s; scaffold: %s' % (sampleId, qId))
leg = qSta
if 100 - qEnd < qSta: leg = qEnd
nn = string.atof(sample.split(':')[fmat['NR']])
n = round(1000 * nn) / 10.0 # N ratio
alt = string.atoi(sample.split(':')[fmat['AA']].split(',')[1]) # Alternate perfect
bot = string.atoi(sample.split(':')[fmat['AA']].split(',')[3]) # Both imperfect
pro, ipr = [0,0]
ms = string.atoi(sample.split(':')[fmat['AS']]) # Mapping score
mip = string.atof(sample.split(':')[fmat['MS']]) # Mismapping probability
if sample.split(':')[fmat['AGE']] != '.':
aveI = string.atoi(sample.split(':')[fmat['AGE']].split(',')[3]) # ave_iden in AGE
else:
aveI = 0
annotations[i].append([leg, n, alt, bot, pro, ipr, ms, mip, aveI])
I.close()
print >> sys.stderr, '# Number of Positions: %d' % len(mark)
if len(mark) != len(annotations[0]): raise ValueError ('[ERROR] The size is not match mark=%d, annotations=%d!' % (len(mark), len(annotations)))
annotations = np.array(annotations);
sampleNum = len(annotations)
""" Extractly we don't have do such normalization
for i in range(sampleNum):
if np.sum(annotations[i]) == 0: continue
goodIndx = [j for j, d in enumerate(annotations[i]) if np.sum(d) > 0]
mean = np.array([d for d in annotations[i] if np.sum(d) > 0]).mean(axis=0)
std = np.array([d for d in annotations[i] if np.sum(d) > 0]).std (axis=0)
annotations[i][goodIndx] = np.abs((annotations[i][goodIndx] - mean)/std) # Normalization Per sample
"""
data, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden = [],[],[],[],[],[],[],[],[],[]
phredScal = []
for i in range(len(annotations[0])):
anno = np.array([annotations[s][i] for s in range(sampleNum) if len(annotations[s][i][annotations[s][i]!=0]) > 0 ]) # each person in the same position
score = np.array([annotations[s][i][-3] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
msprob = np.array([annotations[s][i][-2] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
phred = -10 * np.log10(1.0 - score.sum() / np.sum(score/(1.0 - msprob))) # Phred scale
if len(anno) == 0: continue
leg, n, alt, bot, pro,ipr, ms, mip, aveI = np.median(anno, axis=0)
distance.append ([mark[i][0], mark[i][1], leg ])
properDepth.append ([mark[i][0], mark[i][1], pro ])
imProperDepth.append ([mark[i][0], mark[i][1], ipr ])
nr.append ([mark[i][0], mark[i][1], n ])
aa.append ([mark[i][0], mark[i][1], alt ])
bb.append ([mark[i][0], mark[i][1], bot ])
mscore.append ([mark[i][0], mark[i][1], ms ])
misprob.append ([mark[i][0], mark[i][1], mip ])
aveIden.append ([mark[i][0], mark[i][1], aveI])
phredScal.append ([mark[i][0], mark[i][1], phred])
data.append([leg, alt, pro,ipr, n, bot])
print mark[i][0], mark[i][1], '\t', leg, '\t', pro, '\t', ipr,'\t', n, '\t', alt, '\t', bot
data = np.array(data)
print >> sys.stderr, '\nPosition\tALTernatePerfect\tLeftIdentity\tRightIdentity\tAveIden\tNRatio\tBothImperfect'
print >> sys.stderr, 'Means: ', data.mean(axis=0), '\nstd : ', data.std(axis=0), '\nMedian: ', np.median(data, axis=0)
print >> sys.stderr, '25 Percentile:', np.percentile(data, 25,axis=0), '\n50 Percentile:', np.percentile(data, 50,axis=0), '\n75 Percentile:', np.percentile(data, 75,axis=0)
DrawFig(figPrefix, \
np.array (distance ), \
np.array (properDepth ), \
np.array (imProperDepth), \
np.array (nr ), \
np.array (aa ), \
np.array (bb ), \
np.array (mscore ), \
np.array (misprob ), \
np.array (aveIden ) )
DrawPhredScale (figPrefix + '.phred', np.array(phredScal))
if __name__ == '__main__':
VQ_CUTOFF = 2.0
main(sys.argv[1:])
|
mit
|
ttouchstone/deap
|
examples/es/cma_plotting.py
|
12
|
4326
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import cma
from deap import creator
from deap import tools
import matplotlib.pyplot as plt
# Problem size
N = 10
NGEN = 125
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.rastrigin)
def main(verbose=True):
# The cma module uses the numpy random number generator
numpy.random.seed(64)
# The CMA-ES algorithm takes a population of one individual as argument
# The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html
# for more details about the rastrigin and other tests for CMA-ES
strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
halloffame = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
# Objects that will compile the data
sigma = numpy.ndarray((NGEN,1))
axis_ratio = numpy.ndarray((NGEN,1))
diagD = numpy.ndarray((NGEN,N))
fbest = numpy.ndarray((NGEN,1))
best = numpy.ndarray((NGEN,N))
std = numpy.ndarray((NGEN,N))
for gen in range(NGEN):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
# Update the strategy with the evaluated individuals
toolbox.update(population)
# Update the hall of fame and the statistics with the
# currently evaluated population
halloffame.update(population)
record = stats.compile(population)
logbook.record(evals=len(population), gen=gen, **record)
if verbose:
print(logbook.stream)
# Save more data along the evolution for latter plotting
# diagD is sorted and sqrooted in the update method
sigma[gen] = strategy.sigma
axis_ratio[gen] = max(strategy.diagD)**2/min(strategy.diagD)**2
diagD[gen, :N] = strategy.diagD**2
fbest[gen] = halloffame[0].fitness.values
best[gen, :N] = halloffame[0]
std[gen, :N] = numpy.std(population, axis=0)
# The x-axis will be the number of evaluations
x = list(range(0, strategy.lambda_ * NGEN, strategy.lambda_))
avg, max_, min_ = logbook.select("avg", "max", "min")
plt.figure()
plt.subplot(2, 2, 1)
plt.semilogy(x, avg, "--b")
plt.semilogy(x, max_, "--b")
plt.semilogy(x, min_, "-b")
plt.semilogy(x, fbest, "-c")
plt.semilogy(x, sigma, "-g")
plt.semilogy(x, axis_ratio, "-r")
plt.grid(True)
plt.title("blue: f-values, green: sigma, red: axis ratio")
plt.subplot(2, 2, 2)
plt.plot(x, best)
plt.grid(True)
plt.title("Object Variables")
plt.subplot(2, 2, 3)
plt.semilogy(x, diagD)
plt.grid(True)
plt.title("Scaling (All Main Axes)")
plt.subplot(2, 2, 4)
plt.semilogy(x, std)
plt.grid(True)
plt.title("Standard Deviations in All Coordinates")
plt.show()
if __name__ == "__main__":
main(False)
|
lgpl-3.0
|
jpautom/scikit-learn
|
sklearn/svm/tests/test_bounds.py
|
280
|
2541
|
import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
|
bsd-3-clause
|
Richert/BrainNetworks
|
RC/QIF_macro_readout.py
|
1
|
2360
|
import numpy as np
import pandas as pd
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import StratifiedKFold
from pyrates.utility.visualization import plot_connectivity
from matplotlib.pyplot import show
# define meta parameters
ridge_alphas = [0.0001, 0.001, 0.01]
n_folds = 10
cutoff = 0.0
# load data
path = "/home/rgast/PycharmProjects/BrainNetworks/RC/results"
lorenz_map = pd.read_pickle(f"{path}/lorenz_map.pkl")
lorenz_data = pd.read_pickle(f"{path}/lorenz_ts.pkl")
stula_map = pd.read_pickle(f"{path}/stuart_landau_map.pkl")
stula_data = pd.read_pickle(f"{path}/stuart_landau_ts.pkl")
# train ridge regressions for each eta and alpha
etas, alphas, scores = [], [], []
for i in range(len(lorenz_map.index)):
# extract data from dfs
lorenz_key = lorenz_map.index[i]
eta = lorenz_map.at[lorenz_key, 'eta']
alpha = lorenz_map.at[lorenz_key, 'alpha']
stula_key = stula_map.index[(stula_map.loc[:, 'alpha'] == alpha) * (stula_map.loc[:, 'eta'] == eta)]
lorenz_ts = lorenz_data.loc[cutoff:, ("r", lorenz_key)]
stula_ts = stula_data.loc[cutoff:, ("r", stula_key)]
# generate training data
X = np.concatenate((lorenz_ts.values, stula_ts.values), axis=0)
y = np.zeros((X.shape[0],))
y[:lorenz_data.shape[0]] = 1.0
y[lorenz_data.shape[0]:] = -1.0
# perform cross-validated ridge regression
skf = StratifiedKFold(n_splits=n_folds)
m = RidgeCV(alphas=ridge_alphas, cv=n_folds)
scores_tmp = []
for train_idx, test_idx in skf.split(X, y):
# train ridge regression
m.fit(X[train_idx], y[train_idx])
# get score of trained model
scores_tmp.append(m.score(X[test_idx], y[test_idx]))
mean_score = np.mean(scores_tmp)
# store information
etas.append(eta)
alphas.append(alpha)
scores.append(mean_score)
# visualization
etas_unique = np.sort(np.unique(etas))
alphas_unique = np.sort(np.unique(alphas))
scores_2d = np.zeros((len(alphas_unique), len(etas_unique)))
for eta, alpha, score in zip(etas, alphas, scores):
idx_c = np.argwhere(etas_unique == eta)
idx_r = np.argwhere(alphas_unique == alpha)
if score >= 0.0:
scores_2d[idx_r, idx_c] = score
plot_connectivity(scores_2d, xticklabels=np.round(etas_unique, decimals=2),
yticklabels=np.round(alphas_unique, decimals=2))
show()
|
apache-2.0
|
glorizen/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py
|
72
|
6429
|
"""
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <[email protected]>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
|
agpl-3.0
|
advancedplotting/aplot
|
python/plotserv/api_figure.py
|
1
|
5721
|
# Copyright (c) 2014-2015, Heliosphere Research LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Python-side representation of VIs in api_figure.
"""
from matplotlib import pyplot as plt
from .terminals import remove_none
from .core import resource
from . import filters
@resource('limits')
def limits(ctx, a):
""" Represents api_figure/Limits.vi. """
plotid = a.plotid()
xmin = a.float('xmin')
xmax = a.float('xmax')
ymin = a.float('ymin')
ymax = a.float('ymax')
ctx.set(plotid)
kx = {'xmin': xmin, 'xmax': xmax}
ky = {'ymin': ymin, 'ymax': ymax}
remove_none(kx)
remove_none(ky)
# Only invoke if the user actually specified a min or max
if len(kx) != 0:
ctx.fail_if_polar()
plt.xlim(**kx)
if len(ky) != 0:
plt.ylim(**ky)
def scales(ctx, a, whichscale):
""" Represents XScale and YScale """
SCALES = {0: 'linear', 1: 'linear', 2: 'log', 3: 'symlog'}
plotid = a.plotid()
scale = a.enum('scale', SCALES)
base = a.float('base')
linthresh = a.float('linthresh')
ctx.set(plotid)
# Raises confusing errors deep inside matplotlib.
# Our solution is to ignore invalid bases.
if base is not None and base <= 1:
base = None
# Same here.
if linthresh is not None and linthresh <= 0:
linthresh = None
if whichscale == 'x':
k = {'basex': base, 'linthreshx': linthresh, 'nonposx': 'clip'}
remove_none(k)
plt.xscale(scale, **k)
elif whichscale == 'y':
k = {'basey': base, 'linthreshy': linthresh, 'nonposy': 'clip'}
remove_none(k)
plt.yscale(scale, **k)
@resource('xscale')
def xscale(ctx, a):
return scales(ctx, a, 'x')
@resource('yscale')
def yscale(ctx, a):
return scales(ctx, a, 'y')
@resource('size')
def size(ctx, a):
""" Represents api_figure/Size.vi. """
plotid = a.plotid()
xsize = a.float('xsize')
ysize = a.float('ysize')
dpi = a.float('dpi')
f = ctx.set(plotid)
oldxsize, oldysize = f.get_size_inches()
olddpi = f.get_dpi()
if dpi is None or dpi <= 10:
dpi = olddpi
# Convert to pixels and apply limit logic
if xsize is None or xsize <= 1:
xsize = oldxsize*olddpi
if ysize is None or ysize <= 1:
ysize = oldysize*olddpi
f.set_dpi(dpi)
f.set_size_inches(xsize/dpi, ysize/dpi)
@resource('ticks')
def ticks(ctx, a):
""" Handles TicksPriv.vi """
AXIS = {0: 'xaxis', 1: 'yaxis'}
plotid = a.plotid()
axis = a.enum('axis', AXIS)
ticks = a.dbl_1d('ticks')
ticklabels = a.string_1d('ticklabels')
text = a.text()
ctx.set(plotid)
ctx.fail_if_log_symlog()
ax = plt.gca()
if len(ticklabels) != 0:
ticks, ticklabels = filters.filter_1d(ticks, ticklabels)
else:
ticks, = filters.filter_1d(ticks)
if axis == 'xaxis':
if len(ticks) != 0:
ax.set_xticks(ticks)
if len(ticklabels) != 0:
ax.set_xticklabels(ticklabels)
# Text keywords
k = text._k()
for t in ax.get_xticklabels():
t.set(**k)
elif axis == 'yaxis':
if len(ticks) != 0:
ax.set_yticks(ticks)
if len(ticklabels) != 0:
ax.set_yticklabels(ticklabels)
# Text keywords
k = text._k()
for t in ax.get_yticklabels():
t.set(**k)
@resource("grids")
def grids(ctx, a):
""" Represents Grids.vi. """
plotid = a.plotid()
x = a.bool('x')
y = a.bool('y')
line = a.line()
ctx.set(plotid)
k = { 'color': line.color,
'linestyle': line.style,
'linewidth': line.width, }
remove_none(k)
# If keywords are present, they force the grids on (MPL bug?)
kx = k if x else {}
ky = k if y else {}
plt.grid(x, axis='x', **kx)
plt.grid(y, axis='y', **ky)
|
bsd-3-clause
|
pleoni/game-of-life
|
plot/test_all/life_perf_compilers.py
|
1
|
1863
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from numpy import *
import sys
import datetime
datafile1="life_host_icc.out"
datafile2="life_host_gnu.out"
datafile3="life_host_pgi.out"
if len(sys.argv) > 1:
datafile=sys.argv[1]
plotfile="compilers_perf_eurora.png"
data1 = loadtxt(datafile1)
data2 = loadtxt(datafile2)
data3 = loadtxt(datafile3)
today = datetime.date.today()
fig = plt.figure() # apre una nuova figura
top = fig.add_subplot(211)
bottom = fig.add_subplot(212)
############# TOP
ICC_C1000 = data1[where((data1[:,0]==1) & (data1[:,5]==1000) ),:][0] # mpi 1 - Comp 1000
ICC_C0 = data1[where((data1[:,0]==1) & (data1[:,5]==1) ),:][0] # mpi 1 - comp 0
GNU_C1000 = data2[where((data2[:,0]==1) & (data2[:,5]==1000) ),:][0] # mpi 1 - Comp 1000
GNU_C0 = data2[where((data2[:,0]==1) & (data2[:,5]==1) ),:][0] # mpi 1 - comp 0
PGI_C1000 = data3[where((data3[:,0]==1) & (data3[:,5]==1000) ),:][0] # mpi 1 - Comp 1000
PGI_C0 = data3[where((data3[:,0]==1) & (data3[:,5]==1) ),:][0] # mpi 1 - comp 0
top.set_title(str(today) + ' life_hpc2 on eurora - NCOMP=1000')
top.grid()
top.set_xlabel('lattice Size')
top.set_ylabel('time')
#top.set_yscale('log')
#top.legend()
top.plot(ICC_C1000[:,3],ICC_C1000[:,8],'-xr',GNU_C1000[:,3],GNU_C1000[:,8],'-xg',PGI_C1000[:,3],PGI_C1000[:,8],'-xc');
top.legend(('icc','gnu','pgi'), loc = 'upper left', shadow = False, prop={'size':9})
############# BOTTOM
bottom.set_title(str(today) + ' life_hpc2 on eurora - NCOMP=1')
bottom.grid()
bottom.set_xlabel('lattice size')
bottom.set_ylabel('time')
bottom.plot(ICC_C0[:,3],ICC_C0[:,8],'-xr',GNU_C0[:,3],GNU_C0[:,8],'-xg',PGI_C0[:,3],PGI_C0[:,8],'-xc');
bottom.legend(('icc','gnu','pgi'), loc = 'upper left', shadow = False, prop={'size':9})
plt.subplots_adjust(hspace=0.5)
plt.savefig(plotfile)
#plt.show()
|
gpl-2.0
|
jpinedaf/pyspeckit
|
pyspeckit/spectrum/models/n2hp.py
|
4
|
11414
|
"""
===========
N2H+ fitter
===========
Reference for line params:
Dore (Private Communication), improving on the determinations from
L. Pagani, F. Daniel, and M. L. Dubernet A&A 494, 719-727 (2009)
DOI: 10.1051/0004-6361:200810570
http://www.strw.leidenuniv.nl/~moldata/N2H+.html
http://adsabs.harvard.edu/abs/2005MNRAS.363.1083D
"""
from __future__ import print_function
import numpy as np
import matplotlib.cbook as mpcb
import copy
try:
from astropy.io import fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
from ...mpfit import mpfit
from .. import units
from . import fitter,model,modelgrid
from . import hyperfine
import astropy.units as u
freq_dict_cen ={
'J1-0': 93173.7637e6,
'J2-1': 186344.8420e6,
'J3-2': 279511.8325e6,
}
voff_lines_dict={
####### J 1-0
'J1-0_01': -7.9930,
'J1-0_02': -7.9930,
'J1-0_03': -7.9930,
'J1-0_04': -0.6112,
'J1-0_05': -0.6112,
'J1-0_06': -0.6112,
'J1-0_07': 0.0000,
'J1-0_08': 0.9533,
'J1-0_09': 0.9533,
'J1-0_10': 5.5371,
'J1-0_11': 5.5371,
'J1-0_12': 5.5371,
'J1-0_13': 5.9704,
'J1-0_14': 5.9704,
'J1-0_15': 6.9238,
####### J 2-1
'J2-1_01': -4.6258,
'J2-1_02': -4.5741,
'J2-1_03': -4.4376,
'J2-1_04': -4.2209,
'J2-1_05': -4.0976,
'J2-1_06': -3.8808,
'J2-1_07': -3.1619,
'J2-1_08': -2.9453,
'J2-1_09': -2.3469,
'J2-1_10': -1.9290,
'J2-1_11': -1.5888,
'J2-1_12': -1.5516,
'J2-1_13': -1.4523,
'J2-1_14': -1.1465,
'J2-1_15': -0.8065,
'J2-1_16': -0.6532,
'J2-1_17': -0.4694,
'J2-1_18': -0.1767,
'J2-1_19': 0.0000,
'J2-1_20': 0.0071,
'J2-1_21': 0.1137,
'J2-1_22': 0.1291,
'J2-1_23': 0.1617,
'J2-1_24': 0.2239,
'J2-1_25': 0.5237,
'J2-1_26': 0.6384,
'J2-1_27': 0.7405,
'J2-1_28': 2.1394,
'J2-1_29': 2.5158,
'J2-1_30': 2.5444,
'J2-1_31': 2.6225,
'J2-1_32': 2.8844,
'J2-1_33': 3.0325,
'J2-1_34': 3.0990,
'J2-1_35': 3.2981,
'J2-1_36': 3.5091,
'J2-1_37': 3.8148,
'J2-1_38': 3.8201,
'J2-1_39': 6.9891,
'J2-1_40': 7.5057,
####### J 3-2
'J3-2_01': -3.0666,
'J3-2_02': -2.9296,
'J3-2_03': -2.7221,
'J3-2_04': -2.6563,
'J3-2_05': -2.5270,
'J3-2_06': -2.4010,
'J3-2_07': -2.2535,
'J3-2_08': -2.1825,
'J3-2_09': -2.1277,
'J3-2_10': -1.5862,
'J3-2_11': -1.0158,
'J3-2_12': -0.6131,
'J3-2_13': -0.6093,
'J3-2_14': -0.5902,
'J3-2_15': -0.4872,
'J3-2_16': -0.4725,
'J3-2_17': -0.2757,
'J3-2_18': -0.0697,
'J3-2_19': -0.0616,
'J3-2_20': -0.0022,
'J3-2_21': 0.0000,
'J3-2_22': 0.0143,
'J3-2_23': 0.0542,
'J3-2_24': 0.0561,
'J3-2_25': 0.0575,
'J3-2_26': 0.0687,
'J3-2_27': 0.1887,
'J3-2_28': 0.2411,
'J3-2_29': 0.3781,
'J3-2_30': 0.4620,
'J3-2_31': 0.4798,
'J3-2_32': 0.5110,
'J3-2_33': 0.5540,
'J3-2_34': 0.7808,
'J3-2_35': 0.9066,
'J3-2_36': 1.6382,
'J3-2_37': 1.6980,
'J3-2_38': 2.1025,
'J3-2_39': 2.1236,
'J3-2_40': 2.1815,
'J3-2_41': 2.5281,
'J3-2_42': 2.6458,
'J3-2_43': 2.8052,
'J3-2_44': 3.0320,
'J3-2_45': 3.4963,
}
line_strength_dict = {
####### J 1-0
'J1-0_01': 0.025957,
'J1-0_02': 0.065372,
'J1-0_03': 0.019779,
'J1-0_04': 0.004376,
'J1-0_05': 0.034890,
'J1-0_06': 0.071844,
'J1-0_07': 0.259259,
'J1-0_08': 0.156480,
'J1-0_09': 0.028705,
'J1-0_10': 0.041361,
'J1-0_11': 0.013309,
'J1-0_12': 0.056442,
'J1-0_13': 0.156482,
'J1-0_14': 0.028705,
'J1-0_15': 0.037038,
####### J 2-1
'J2-1_01': 0.008272,
'J2-1_02': 0.005898,
'J2-1_03': 0.031247,
'J2-1_04': 0.013863,
'J2-1_05': 0.013357,
'J2-1_06': 0.010419,
'J2-1_07': 0.000218,
'J2-1_08': 0.000682,
'J2-1_09': 0.000152,
'J2-1_10': 0.001229,
'J2-1_11': 0.000950,
'J2-1_12': 0.000875,
'J2-1_13': 0.002527,
'J2-1_14': 0.000365,
'J2-1_15': 0.000164,
'J2-1_16': 0.021264,
'J2-1_17': 0.031139,
'J2-1_18': 0.000576,
'J2-1_19': 0.200000,
'J2-1_20': 0.001013,
'J2-1_21': 0.111589,
'J2-1_22': 0.088126,
'J2-1_23': 0.142604,
'J2-1_24': 0.011520,
'J2-1_25': 0.027608,
'J2-1_26': 0.012800,
'J2-1_27': 0.066354,
'J2-1_28': 0.013075,
'J2-1_29': 0.003198,
'J2-1_30': 0.061880,
'J2-1_31': 0.004914,
'J2-1_32': 0.035879,
'J2-1_33': 0.011026,
'J2-1_34': 0.039052,
'J2-1_35': 0.019767,
'J2-1_36': 0.004305,
'J2-1_37': 0.001814,
'J2-1_38': 0.000245,
'J2-1_39': 0.000029,
'J2-1_40': 0.000004,
####### J 3-2
'J3-2_01': 0.001845,
'J3-2_02': 0.001818,
'J3-2_03': 0.003539,
'J3-2_04': 0.014062,
'J3-2_05': 0.011432,
'J3-2_06': 0.000089,
'J3-2_07': 0.002204,
'J3-2_08': 0.002161,
'J3-2_09': 0.000061,
'J3-2_10': 0.000059,
'J3-2_11': 0.000212,
'J3-2_12': 0.000255,
'J3-2_13': 0.000247,
'J3-2_14': 0.000436,
'J3-2_15': 0.010208,
'J3-2_16': 0.000073,
'J3-2_17': 0.007447,
'J3-2_18': 0.000000,
'J3-2_19': 0.000155,
'J3-2_20': 0.000274,
'J3-2_21': 0.174603,
'J3-2_22': 0.018683,
'J3-2_23': 0.135607,
'J3-2_24': 0.100527,
'J3-2_25': 0.124866,
'J3-2_26': 0.060966,
'J3-2_27': 0.088480,
'J3-2_28': 0.001083,
'J3-2_29': 0.094510,
'J3-2_30': 0.014029,
'J3-2_31': 0.007191,
'J3-2_32': 0.022222,
'J3-2_33': 0.047915,
'J3-2_34': 0.015398,
'J3-2_35': 0.000071,
'J3-2_36': 0.000794,
'J3-2_37': 0.001372,
'J3-2_38': 0.007107,
'J3-2_39': 0.016618,
'J3-2_40': 0.009776,
'J3-2_41': 0.000997,
'J3-2_42': 0.000487,
'J3-2_43': 0.000069,
'J3-2_44': 0.000039,
'J3-2_45': 0.000010,
}
# Get frequency dictionary in Hz based on the offset velocity and rest frequency
conv_J10=u.doppler_radio(freq_dict_cen['J1-0']*u.Hz)
conv_J21=u.doppler_radio(freq_dict_cen['J2-1']*u.Hz)
conv_J32=u.doppler_radio(freq_dict_cen['J3-2']*u.Hz)
freq_dict = {
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J10).value) for name in voff_lines_dict.keys() if "J1-0" in name
}
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J21).value) for name in voff_lines_dict.keys() if "J2-1" in name
})
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J32).value) for name in voff_lines_dict.keys() if "J3-2" in name
})
# relative_strength_total_degeneracy is not used in the CLASS implementation
# of the hfs fit. It is the sum of the degeneracy values for all hyperfines
# for a given line; it gives the relative weights between lines.
# Hyperfine weights are treated as normalized within one rotational transition.
w10 = sum(val for name,val in line_strength_dict.items() if 'J1-0' in name)
w21 = sum(val for name,val in line_strength_dict.items() if 'J2-1' in name)
w32 = sum(val for name,val in line_strength_dict.items() if 'J3-2' in name)
relative_strength_total_degeneracy = {
name : w10 for name in line_strength_dict.keys() if "J1-0" in name
}
relative_strength_total_degeneracy.update({
name : w21 for name in line_strength_dict.keys() if "J2-1" in name
})
relative_strength_total_degeneracy.update({
name : w32 for name in line_strength_dict.keys() if "J3-2" in name
})
# Get the list of line names from the previous lists
line_names = [name for name in voff_lines_dict.keys()]
n2hp_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict,
line_strength_dict,
relative_strength_total_degeneracy)
n2hp_vtau_fitter = n2hp_vtau.fitter
n2hp_vtau_vheight_fitter = n2hp_vtau.vheight_fitter
n2hp_vtau_tbg_fitter = n2hp_vtau.background_fitter
# RADEX part from old file
def n2hp_radex(xarr,
density=4,
column=13,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
grid_vwidth_scale=False,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
temperature_gridnumber=3,
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = copy.copy(xarr)
xarr.convert_to_unit('Hz', quiet=True)
tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
tau = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
print("density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex))
if debug:
import pdb; pdb.set_trace()
return n2hp_vtau(xarr,Tex=tex,tau=tau,xoff_v=xoff_v,width=width,**kwargs)
|
mit
|
lucidfrontier45/scikit-learn
|
sklearn/linear_model/tests/test_base.py
|
8
|
3585
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD Style.
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
"""
Test LinearRegression on a simple dataset.
"""
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
"""
Test assertions on betas shape.
"""
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
|
bsd-3-clause
|
fengjiang96/tushare
|
tushare/stock/billboard.py
|
19
|
12058
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
龙虎榜数据
Created on 2015年6月10日
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from pandas.compat import StringIO
from tushare.stock import cons as ct
import numpy as np
import time
import re
import lxml.html
from lxml import etree
from tushare.util import dateu as du
from tushare.stock import ref_vars as rv
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def top_list(date = None, retry_count=3, pause=0.001):
"""
获取每日龙虎榜列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 如果为空,返回最近一个交易日的数据
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:代码
name :名称
pchange:涨跌幅
amount:龙虎榜成交额(万)
buy:买入额(万)
bratio:占总成交比例
sell:卖出额(万)
sratio :占总成交比例
reason:上榜原因
date :日期
"""
if date is None:
if du.get_hour() < 18:
date = du.last_tddate()
else:
date = du.today()
else:
if(du.is_holiday(date)):
return None
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'], date))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dt_1\"]")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr)[0]
df.columns = [i for i in range(1,12)]
df = df.apply(_f_rows, axis=1)
df = df.fillna(method='ffill')
df = df.drop([1, 4], axis=1)
df.columns = rv.LHB_COLS
df = df.drop_duplicates()
df['code'] = df['code'].astype(int)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
df['date'] = date
except:
pass
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def cap_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取个股上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:代码
name:名称
count:上榜次数
bamount:累积购买额(万)
samount:累积卖出额(万)
net:净额(万)
bcount:买入席位数
scount:卖出席位数
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _cap_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
if df is not None:
df = df.drop_duplicates('code')
return df
def _cap_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[0],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_GGTJ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _cap_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def broker_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取营业部上榜统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
---------
broker:营业部名称
count:上榜次数
bamount:累积购买额(万)
bcount:买入席位数
samount:累积卖出额(万)
scount:卖出席位数
top3:买入前三股票
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _broker_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
return df
def _broker_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[1],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_YYTJ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _broker_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def inst_tops(days= 5, retry_count= 3, pause= 0.001):
"""
获取机构席位追踪统计数据
Parameters
--------
days:int
天数,统计n天以来上榜次数,默认为5天,其余是10、30、60
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
code:代码
name:名称
bamount:累积买入额(万)
bcount:买入次数
samount:累积卖出额(万)
scount:卖出次数
net:净额(万)
"""
if ct._check_lhb_input(days) is True:
ct._write_head()
df = _inst_tops(days, pageNo=1, retry_count=retry_count,
pause=pause)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _inst_tops(last=5, pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[2],
ct.PAGES['fd'], last, pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop([2,3], axis=1)
df.columns = rv.LHB_JGZZ_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _inst_tops(last, pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def inst_detail(retry_count= 3, pause= 0.001):
"""
获取最近一个交易日机构席位成交明细统计数据
Parameters
--------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
----------
code:股票代码
name:股票名称
date:交易日期
bamount:机构席位买入额(万)
samount:机构席位卖出额(万)
type:类型
"""
ct._write_head()
df = _inst_detail(pageNo=1, retry_count=retry_count,
pause=pause)
if len(df)>0:
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _inst_detail(pageNo=1, retry_count=3, pause=0.001, dataArr=pd.DataFrame()):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.LHB_SINA_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], rv.LHB_KINDS[3],
ct.PAGES['fd'], '', pageNo))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = rv.LHB_JGMX_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _inst_detail(pageNo, retry_count, pause, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def _f_rows(x):
if '%' in x[3]:
x[11] = x[6]
for i in range(6, 11):
x[i] = x[i-5]
for i in range(1, 6):
x[i] = np.NaN
return x
if __name__ == "__main__":
print(top_list('2015-06-17'))
# print(inst_detail())
|
bsd-3-clause
|
histed/pylibnidaqmxMH
|
nidaqmx/wxagg_plot.py
|
16
|
4515
|
import os
import sys
import time
import traceback
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
import wx
from matplotlib.figure import Figure
class PlotFigure(wx.Frame):
def OnKeyPressed (self, event):
key = event.key
if key=='q':
self.OnClose(event)
def __init__(self, func, timer_period):
wx.Frame.__init__(self, None, -1, "Plot Figure")
self.fig = Figure((12,9), 75)
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
self.canvas.mpl_connect('key_press_event', self.OnKeyPressed)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
self.func = func
self.plot = None
self.timer_period = timer_period
self.timer = wx.Timer(self)
self.is_stopped = False
if os.name=='nt':
# On Windows, default frame size behaviour is incorrect
# you don't need this under Linux
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(Size(fw, th))
# Create a figure manager to manage things
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, wx.GROW)
self.SetSizer(sizer)
self.Fit()
self.Bind(wx.EVT_TIMER, self.OnTimerWrap, self.timer)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.timer.Start(timer_period)
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def OnClose(self, event):
self.is_stopped = True
print 'Closing PlotFigure, please wait.'
self.timer.Stop()
self.Destroy()
def OnTimerWrap (self, evt):
if self.is_stopped:
print 'Ignoring timer callback'
return
t = time.time()
try:
self.OnTimer (evt)
except KeyboardInterrupt:
self.OnClose(evt)
duration = 1000*(time.time () - t)
if duration > self.timer_period:
print 'Changing timer_period from %s to %s msec' % (self.timer_period, 1.2*duration)
self.timer_period = 1.2*duration
self.timer.Stop()
self.timer.Start (self.timer_period)
def OnTimer(self, evt):
try:
xdata, ydata_list, legend = self.func()
except RuntimeError:
traceback.print_exc(file=sys.stderr)
self.OnClose(evt)
return
if len (ydata_list.shape)==1:
ydata_list = ydata_list.reshape((1, ydata_list.size))
if self.plot is None:
self.axes = self.fig.add_axes([0.1,0.1,0.8,0.8])
l = []
for ydata in ydata_list:
l.extend ([xdata, ydata])
self.plot = self.axes.plot(*l)
self.axes.set_xlabel('Seconds')
self.axes.set_ylabel('Volts')
self.axes.set_title('nof samples=%s' % (len(xdata)))
self.axes.legend (legend)
else:
self.axes.set_xlim(xmin = xdata[0], xmax=xdata[-1])
ymin, ymax = 1e9,-1e9
for line, data in zip (self.plot, ydata_list):
line.set_xdata(xdata)
line.set_ydata(data)
ymin, ymax = min (data.min (), ymin), max (data.max (), ymax)
dy = (ymax-ymin)/20
self.axes.set_ylim(ymin=ymin-dy, ymax=ymax+dy)
self.canvas.draw()
def onEraseBackground(self, evt):
# this is supposed to prevent redraw flicker on some X servers...
pass
def animated_plot(func, timer_period):
app = wx.PySimpleApp(clearSigInt=False)
frame = PlotFigure(func, timer_period)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
from numpy import *
import time
start_time = time.time ()
def func():
x = arange (100, dtype=float)/100*pi
d = sin (x+(time.time ()-start_time))
return x, d, ['sin (x+time)']
try:
animated_plot (func, 1)
except Exception, msg:
print 'Got exception: %s' % ( msg)
else:
print 'Exited normally'
|
bsd-3-clause
|
reinaldomaslim/Singaboat_RobotX2016
|
robotx_nav/nodes/task7_non_process_2.py
|
1
|
8086
|
#!/usr/bin/env python
""" Mission 7-Detect and Deliver
1. Random walk with gaussian at center of map until station position is acquired
2. loiter around until correct face seen
3. if symbol seen, move towards symbol perpendicularly
4. if close enough, do move_base aiming
task 7:
-----------------
Created by Reinaldo@ 2016-12-07
Authors: Reinaldo
-----------------
"""
import rospy
import multiprocessing as mp
import math
import time
import numpy as np
import os
import tf
import random
from sklearn.cluster import KMeans
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_waypoint import MoveTo
from move_base_loiter import Loiter
from move_base_stationkeeping import StationKeeping
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import Int8
class DetectDeliver(object):
map_dim = [[0, 40], [0, 40]]
MAX_DATA=10
x0, y0, yaw0= 0, 0, 0
symbol=[0 , 0]
symbols=np.zeros((MAX_DATA, 2)) #unordered list
symbols_counter=0
angle_threshold=10*math.pi/180
symbol_location=np.zeros((MAX_DATA, 2))
shape_counter=0
distance_to_box=3
def __init__(self, symbol_list):
print("starting task 7")
rospy.init_node('task_7', anonymous=True)
self.symbol=symbol_list
self.symbol_visited=0
self.symbol_seen=False
self.symbol_position=[0, 0, 0]
self.station_seen=False #station here is cluster center of any face
self.station_position=[0, 0]
self.loiter_obj = Loiter("loiter", is_newnode=False, target=None, radius=5, polygon=4, mode=2, mode_param=1, is_relative=False)
self.moveto_obj = MoveTo("moveto", is_newnode=False, target=None, is_relative=False)
self.stationkeep_obj = StationKeeping("station_keeping", is_newnode=False, target=None, radius=2, duration=30)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.symbol_callback, queue_size = 50)
rospy.Subscriber("/finished_search_and_shoot", Int8, self.stop_shoot_callback, queue_size = 5)
self.shooting_pub= rospy.Publisher('/start_search_and_shoot', Int8, queue_size=5)
self.marker_pub= rospy.Publisher('/waypoint_markers', Marker, queue_size=5)
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
self.odom_received = False
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
print("odom received")
print(self.symbol)
while not rospy.is_shutdown() and not self.station_seen:
self.moveto_obj.respawn(self.random_walk(), )#forward
print("station: ")
print(self.station_position)
#loiter around station until symbol's face seen
loiter_radius=math.sqrt((self.x0-self.station_position[0])**2+(self.y0-self.station_position[1])**2)
if loiter_radius>10:
loiter_radius=10
while not rospy.is_shutdown():
print(loiter_radius)
self.loiter_obj.respawn(self.station_position, loiter_radius, )
if loiter_radius>4:
loiter_radius-=2
if self.symbol_seen:
print(self.symbol_position)
print("symbol's position acquired, exit loitering")
break
time.sleep(1)
print(self.symbol_position)
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
counter=0
print(d)
#moveto an offset, replan in the way
while not rospy.is_shutdown():
alpha=self.yaw0-self.symbol_position[2]
theta=math.atan2(math.fabs(math.sin(alpha)), math.fabs(math.cos(alpha))) #always +ve and 0-pi/2
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
perpendicular_d=0.6*d*math.cos(theta)
if counter ==0 or theta>self.angle_threshold or d>self.distance_to_box:
print("replan")
target=[self.symbol_position[0]+perpendicular_d*math.cos(self.symbol_position[2]),self.symbol_position[1]+perpendicular_d*math.sin(self.symbol_position[2]), -self.symbol_position[2]]
self.moveto_obj.respawn(target, )
counter+=1
if d<self.distance_to_box:
break
time.sleep(1)
#aiming to the box
self.shooting_complete=False
self.is_aiming=False
print("aiming to box")
print("start shooting module")
self.shooting_pub.publish(1)
station=[self.x0, self.y0, -self.symbol_position[2]]
radius=2
duration=30
print(self.symbol_position)
print(station)
while not rospy.is_shutdown():
self.shooting_pub.publish(1)
#duration 0 is forever
if not self.is_aiming:
self.stationkeep_obj.respawn(station, radius, duration)
#make aiming respawn
if self.shooting_complete:
print("shooting done, return to base")
break
time.sleep(1)
def stop_shoot_callback(self, msg):
if msg.data==1:
#stop aiming station
self.shooting_complete=True
def random_walk(self):
""" create random walk points and more favor towards center """
x = random.gauss(np.mean(self.map_dim[0]), 0.25 * np.ptp(self.map_dim[0]))
y = random.gauss(np.mean(self.map_dim[1]), 0.25 * np.ptp(self.map_dim[1]))
return self.map_constrain(x, y)
def map_constrain(self, x, y):
""" constrain x and y within map """
if x > np.max(self.map_dim[0]):
x = np.max(self.map_dim[0])
elif x < np.min(self.map_dim[0]):
x = np.min(self.map_dim[0])
else:
x = x
if y > np.max(self.map_dim[1]):
y = np.max(self.map_dim[1])
elif y < np.min(self.map_dim[1]):
y = np.min(self.map_dim[1])
else:
y = y
return [x, y, 0]
def symbol_callback(self, msg):
if len(msg.markers)>0:
if self.symbols_counter>self.MAX_DATA:
station_kmeans = KMeans(n_clusters=1).fit(self.symbols)
self.station_center=station_kmeans.cluster_centers_
self.station_position[0]=self.station_center[0][0]
self.station_position[1]=self.station_center[0][1]
self.station_seen=True
for i in range(len(msg.markers)):
self.symbols[self.symbols_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.symbols_counter+=1
if msg.markers[i].type==self.symbol[0] and msg.markers[i].id==self.symbol[1]:
#set position_list (not sure)
self.symbol_position[0]=msg.markers[i].pose.position.x
self.symbol_position[1]=msg.markers[i].pose.position.y
x = msg.markers[i].pose.orientation.x
y = msg.markers[i].pose.orientation.y
z = msg.markers[i].pose.orientation.z
w = msg.markers[i].pose.orientation.w
_, _, self.symbol_position[2] = euler_from_quaternion((x, y, z, w))
self.symbol_location[self.shape_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.shape_counter+=1
if self.station_seen and self.shape_counter>self.MAX_DATA:
symbol_kmeans = KMeans(n_clusters=1).fit(self.symbol_location)
self.symbol_center=symbol_kmeans.cluster_centers_
self.symbol_position[0]=self.symbol_center[0][0]
self.symbol_position[1]=self.symbol_center[0][1]
#print(self.symbol_position)
self.symbol_seen=True
#self.pool.apply(cancel_loiter)
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
if __name__ == '__main__':
try:
#[id,type]cruciform red
DetectDeliver([1,0])
except rospy.ROSInterruptException:
rospy.loginfo("Task 7 Finished")
|
gpl-3.0
|
voibit/plot.py
|
plot.py
|
1
|
3288
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mathis 2016
#
# Et programt som plotter fildata.
import numpy as np
import matplotlib.pyplot as plt
import sys
#----------------------------------------------------------------------------------
filnavn='tabell.csv' #sett variablene for plotteprogrammet
separator=' ' #hvordan er verdiene pr linje separert normalt er ' '
#Om du vil at programet selv skal finne grensene for x og y aksene.
grenseFraData=1 #1 dersom programmet skal gjøre de, 0 dersom du vil sette under.
Xmin=-20
Xmax=360
Ymin=-1
Ymax=2
#----------------------------------------------------------------------------------
# Hvis et argumet er git til programmet, sett det som tabellnavn
if len(sys.argv) >= 2:
filnavn = sys.argv[1]
if len(sys.argv) == 3:
separator = sys.argv[2]
#Laster data fra filen
with open(filnavn) as f:
linjer = f.readlines()
#initialiserer aksene og min og maksverdier
akse=[]
ymin=[]
ymax=[]
if len(linjer)< 2:
print("GRAF.PY: Ingen linjer i "+filnavn+" :'( ")
exit()
#Gjor om til en liste med linjer til akser med data.
for linjenr, linje in enumerate(linjer) :
#behandler kolonne i rad
#fjerner overflødige mellomrom dersom filen er mellomromseparart
if separator==' ':
linje=linje.split()
#beholder de dersom separert av noe annet
else:
linje=linje.split(separator)
for kolnr, kolonne in enumerate(linje) :
#hopper over overskrifsrad
if linjenr == 0:
akse.append([])
ymin.append(1e300)
ymax.append(-1e300)
akse[kolnr].append(kolonne)
else:
#Legger til min og maks verdier.
kolonne= float(kolonne)
if (kolonne < ymin[kolnr]):
ymin[kolnr]=kolonne
if (kolonne > ymax[kolnr]):
ymax[kolnr]=kolonne
akse[kolnr].append(kolonne)
x=akse[0][1:]
if len(akse) < 2:
print ("GRAF.PY: Du har ikke oppgitt en gyldig fil med 2 eller fler akser.. ")
exit()
elif len(x) == 0:
print ("GRAF.PY: "+filnavn+" har ikke gyldig data")
exit()
plt.figure(figsize=(9,6), dpi=80)
#setter vinustittel
fig = plt.gcf()
fig.canvas.set_window_title("plot.py: "+filnavn)
#setter
plt.title(filnavn, fontsize=20)
#Sette navnet på x aksen (første verdi i data)
plt.xlabel(akse[0][0])
plt.ylabel("vedi")
if plt.get_backend().lower() in ['agg', 'macosx']:
fig.set_tight_layout(True)
else:
fig.tight_layout()
# xmin, xmax ymin ymax
# hvis grense skal beregnes fra data.
if grenseFraData == 1:
grenser=[]
grenser.append(min(x))
grenser.append(max(x))
grenser.append(min(ymin[1:]))
grenser.append(max(ymax[1:]))
#hvis grenser for aksene ønskes å settes manuellt
else :
grenser=[Xmin,Xmax,Ymin,Ymax]
# sett grensene
plt.axis(grenser)
#gir antall akser
antAkser=len(akse);
# Plot alle radene radene (y- veridene)
for i in range(antAkser)[1:]:
plt.plot(x,akse[i][1:],label=akse[i][0])
#akser
plt.legend().get_frame().set_alpha(0.5)
#Regner ut skala for å kalkulere pilhodene
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
xscale = (xmax-xmin )/100
yscale = (ymax-ymin )/100
#viser en x og y akse.
plt.arrow(xmin, 0, (xmax-xmin)*0.98, 0, head_width=yscale*2,head_length=xscale*2,fc='k', ec='k', lw=2)
plt.arrow(0, ymin, 0, (ymax-ymin)*0.96, head_width=xscale*1.5, head_length=yscale*3,fc='k', ec='k', lw=2)
#viser rutenett
plt.grid()
#viser grafen
plt.show()
|
mit
|
haisland0909/Denoising-Dirty-Documents
|
script/preprocessing.py
|
1
|
3807
|
#coding: UTF8
import img_to_pickle as i_p
import os
import cv2
import numpy as np
from sklearn.pipeline import FeatureUnion
from sklearn import preprocessing
import sklearn.linear_model
import sklearn.ensemble
import img_to_pickle as i_p
import features as f
ROOT = os.path.abspath(os.path.dirname(__file__))
PICKLE_DIR = ROOT.replace("script", "tmp/kaggle_dirtydoc_data/pickle_data")
SAVE_DIR = ROOT.replace("script", "tmp/kaggle_dirtydoc_data/divide_image")
def padding(img):
pad1 = 0
row = img.shape[0]
if row%10 != 0:
pad1 = 10 - row%10
row = row + pad1
pad2 = 0
col = img.shape[1]
if col%10 != 0:
pad2 = 10 - row%10
col = col + pad2
cols = [0]*col
image = [cols] * row
image = np.asarray(image)
image = image.astype(np.uint8)
image[pad1/2:(pad1/2)+img.shape[0], pad2/2:(pad2/2)+img.shape[1]] = img
return image
def divide(data, target):
save_dir = SAVE_DIR + "/" + target + "/"
numbers = data.keys()
for i in xrange(len(numbers)):
img = data[numbers[i]]
if img.shape[0] == 420:
for ys in xrange(38):
Y = ys * 10
for xs in xrange(50):
X = xs * 10
divide_data = img[Y:Y+50, X:X+50]
savefile = save_dir + numbers[i] + "_" + str(xs) + "_" + str(ys) + ".jpg"
cv2.imwrite(savefile, divide_data)
elif img.shape[0] == 258:
img = padding(img)
for ys in xrange(22):
Y = ys * 10
for xs in xrange(50):
X = xs * 10
divide_data = img[Y:Y+50, X:X+50]
savefile = save_dir + numbers[i] + "_" + str(xs) + "_" + str(ys) + ".jpg"
cv2.imwrite(savefile, divide_data)
else:
print "error"
quit()
def make_dividedata():
data = i_p.pickle_up(PICKLE_DIR, "train_gray")
divide(data, "train")
data = i_p.pickle_up(PICKLE_DIR, "test_gray")
divide(data, "test")
data = i_p.pickle_up(PICKLE_DIR, "clean_gray")
divide(data, "train_label")
def make_checkdata(mode="df"):
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
Std = preprocessing.StandardScaler()
_, _, _, train_gray_data, test_gray_data, _, labels = i_p.load_data()
train_keys = train_gray_data.keys()[:2]
train_inputs = {}
train_labels = {}
for i in xrange(len(train_keys)):
input_ = train_gray_data[train_keys[i]]
label = labels[train_keys[i]]
train_inputs.update({train_keys[i]:input_})
train_labels.update({train_keys[i]:label})
test_keys = test_gray_data.keys()[:2]
test_inputs = {}
for i in xrange(len(test_keys)):
input_ = test_gray_data[test_keys[i]]
test_inputs.update({test_keys[i]:input_})
train_df = f.make_data_df(train_inputs, train_labels)
test_df = f.make_test_df(test_inputs)
if mode == "df":
train_df = train_df.reset_index()
test_df = test_df.reset_index()
train_df.columns = ["pngname", "input", "label"]
test_df.columns = ["pngname", "input"]
return train_df, train_keys, test_df, test_keys
elif mode == "feature":
X_train = fu.fit_transform(train_df)
X_train = Std.fit_transform(X_train)
y_train = np.concatenate(train_df["label"].apply(lambda x: x.flatten()))
X_test = fu.fit_transform(test_df)
X_test = Std.fit_transform(X_test)
return X_train, y_train, X_test
if __name__ == "__main__":
#make_dividedata()
make_checkdata()
|
apache-2.0
|
kazemakase/scikit-learn
|
sklearn/mixture/gmm.py
|
128
|
31069
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
bsd-3-clause
|
wlamond/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
70
|
7808
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/matplotlib/compat/subprocess.py
|
12
|
1817
|
"""
A replacement wrapper around the subprocess module, with a number of
work-arounds:
- Provides a stub implementation of subprocess members on Google App Engine
(which are missing in subprocess).
- Use subprocess32, backport from python 3.2 on Linux/Mac work-around for
https://github.com/matplotlib/matplotlib/issues/5314
Instead of importing subprocess, other modules should use this as follows:
from matplotlib.compat import subprocess
This module is safe to import from anywhere within matplotlib.
"""
from __future__ import absolute_import # Required to import subprocess
from __future__ import print_function
import os
import sys
if os.name == 'posix' and sys.version_info[0] < 3:
# work around for https://github.com/matplotlib/matplotlib/issues/5314
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
else:
import subprocess
__all__ = ['Popen', 'PIPE', 'STDOUT', 'check_output', 'CalledProcessError']
if hasattr(subprocess, 'Popen'):
Popen = subprocess.Popen
# Assume that it also has the other constants.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
CalledProcessError = subprocess.CalledProcessError
check_output = subprocess.check_output
else:
# In restricted environments (such as Google App Engine), these are
# non-existent. Replace them with dummy versions that always raise OSError.
def Popen(*args, **kwargs):
raise OSError("subprocess.Popen is not supported")
def check_output(*args, **kwargs):
raise OSError("subprocess.check_output is not supported")
PIPE = -1
STDOUT = -2
# There is no need to catch CalledProcessError. These stubs cannot raise
# it. None in an except clause will simply not match any exceptions.
CalledProcessError = None
|
mit
|
gsnyder206/mock-surveys
|
original_illustris/hdst_mockudf.py
|
1
|
21619
|
import math
import string
import sys
import struct
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
import matplotlib.colors as pycolors
import matplotlib.cm as cm
import matplotlib.patches as patches
import numpy as np
import cPickle
import asciitable
import scipy.ndimage
import scipy.stats as ss
import scipy.signal
import scipy as sp
import scipy.odr as odr
import astropy.io.fits as pyfits
import glob
import os
import make_color_image
import make_fake_wht
import gzip
import tarfile
import shutil
import cosmocalc
import congrid
import astropy.io.ascii as ascii
sq_arcsec_per_sr = 42545170296.0
c = 3.0e8
def render_only(outfile='HDUDF_v1.pdf',hst_only=False,maglim=28,label='_SB28_',unit='nJySr'):
print "reading b"
b=pyfits.open('hudf_F606W_Jy.fits')[0].data #hdudf_v.final_array
print "reading g"
g=pyfits.open('hudf_F850LP_Jy.fits')[0].data #hdudf_z.final_array
print "reading r"
r=pyfits.open('hudf_F160W_Jy.fits')[0].data #hdudf_h.final_array
pixel_arcsec = pyfits.open('hudf_F160W_Jy.fits')[0].header['PIXSCALE']
if unit=='Jy':
conv = (1.0e9)*(1.0/pixel_arcsec**2)*sq_arcsec_per_sr
#res = render_hdudf(b*conv,g*conv,r*conv,'HUDF'+label+'_v1.pdf',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.10,FWHM_arcsec_g=0.15,FWHM_arcsec_r=0.20,convolve=True,dpi=600,maglim=maglim)
#res = render_hdudf(b,g,r,'HUDF'+label+'small_v1.jpg',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.10,FWHM_arcsec_g=0.15,FWHM_arcsec_r=0.20,convolve=True,dpi=60,maglim=maglim)
res = render_hdudf(b*conv,g*conv,r*conv,'HUDF'+label+'big_v4.jpg',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.10,FWHM_arcsec_g=0.15,FWHM_arcsec_r=0.20,convolve=True,dpi=1200,maglim=maglim)
res = render_hdudf(b*conv,g*conv,r*conv,'HUDF'+label+'small_v4.jpg',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.10,FWHM_arcsec_g=0.15,FWHM_arcsec_r=0.20,convolve=True,dpi=60,maglim=maglim)
if hst_only==True:
return
print "reading b"
b=pyfits.open('hdudf_6mas_F606W_Jy.fits')[0].data #hdudf_v.final_array
print "reading g"
g=pyfits.open('hdudf_6mas_F850LP_Jy.fits')[0].data#hdudf_z.final_array
print "reading r"
r=pyfits.open('hdudf_6mas_F160W_Jy.fits')[0].data#hdudf_h.final_array
pixel_arcsec = pyfits.open('hdudf_6mas_F160W_Jy.fits')[0].header['PIXSCALE']
if unit=='Jy':
conv = (1.0e9)*(1.0/pixel_arcsec**2)*sq_arcsec_per_sr
#assume trying for 8m telescope
#res = render_hdudf(b*conv,g*conv,r*conv,'HDUDF'+label+'_v1.pdf',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.017,FWHM_arcsec_g=0.025,FWHM_arcsec_r=0.050,convolve=True,dpi=2000,maglim=maglim)
#settings for 12m
res = render_hdudf(b*conv,g*conv,r*conv,'HDUDF'+label+'big_v4.jpg',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.012,FWHM_arcsec_g=0.018,FWHM_arcsec_r=0.032,convolve=True,dpi=1200,maglim=maglim)
return
def render_hdudf(b,g,r,filename,pixel_arcsec=0.006,FWHM_arcsec_b=0.012,FWHM_arcsec_g=0.015,FWHM_arcsec_r=0.025,convolve=True,dpi=2000,maglim=28.0):
#maglim in mag/arcsec^2
redfact = 1.5*(0.60/1.60)**(1)
greenfact = 0.9*(0.60/0.85)**(1)
bluefact = 1.2
efflams = [1.60,1.25,0.90,0.775,0.606,0.435,0.814,1.05,1.40]
alph=7.0
Q = 5.0
target_ratio = 10.0**(-0.4*(27.0-maglim))
fluxscale = target_ratio*1.0e-14
pixel_Sr = (pixel_arcsec**2)/sq_arcsec_per_sr
#new version, already in nJy/Sr
to_nJy_per_Sr_b = 1#(1.0e9)*(1.0e14)*(efflams[4]**2)/c #((pixscale/206265.0)^2)*
to_nJy_per_Sr_g = 1#(1.0e9)*(1.0e14)*(efflams[2]**2)/c
to_nJy_per_Sr_r = 1#(1.0e9)*(1.0e14)*(efflams[0]**2)/c
#b_nJySr = to_nJy_per_Sr_b*b
#g_nJySr = to_nJy_per_Sr_g*g
#r_nJySr = to_nJy_per_Sr_r*r
sigma_pixels_b = FWHM_arcsec_b/pixel_arcsec/2.355
sigma_pixels_g = FWHM_arcsec_g/pixel_arcsec/2.355
sigma_pixels_r = FWHM_arcsec_r/pixel_arcsec/2.355
print "sigma pixels: ", sigma_pixels_g
if convolve==True:
print "convolving images"
b = scipy.ndimage.filters.gaussian_filter(b,sigma_pixels_b)
g = scipy.ndimage.filters.gaussian_filter(g,sigma_pixels_g)
r = scipy.ndimage.filters.gaussian_filter(r,sigma_pixels_r)
sigma_nJy = 0.3*(2.0**(-0.5))*((1.0e9)*(3631.0/5.0)*10.0**(-0.4*maglim))*pixel_arcsec*(3.0*FWHM_arcsec_g)
print "adding noise, in nJy/Sr: ", sigma_nJy/pixel_Sr
Npix = b.shape[0]
b = (b*to_nJy_per_Sr_b + np.random.randn(Npix,Npix)*sigma_nJy/pixel_Sr)
g = (g*to_nJy_per_Sr_g + np.random.randn(Npix,Npix)*sigma_nJy/pixel_Sr)
r = (r*to_nJy_per_Sr_r + np.random.randn(Npix,Npix)*sigma_nJy/pixel_Sr)
print "preparing color image"
rgbdata = make_color_image.make_interactive_light_nasa(b*fluxscale*bluefact,g*fluxscale*greenfact,r*fluxscale*redfact,alph,Q)
print rgbdata.shape
print "preparing figure"
f9 = pyplot.figure(figsize=(12.0,12.0), dpi=dpi)
pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0,wspace=0.0,hspace=0.0)
axi = pyplot.axes([0.0,0.0,1.0,1.0],frameon=True,axisbg='black')
axi.set_xticks([]) ; axi.set_yticks([])
print "rendering color image"
axi.imshow(rgbdata,interpolation='nearest',origin='upper',extent=[-1,1,-1,1])
print "saving color image"
#pyplot.rcParams['pdf.compression'] = 1
f9.savefig(filename,dpi=dpi,quality=90,bbox_inches='tight',pad_inches=0.0)
pyplot.close(f9)
#pyplot.rcdefaults()
return
class mock_hdudf:
def __init__(self,Npix,Pix_arcsec,blank_array,filter_string,simdata,Narcmin,eff_lambda_microns,maglim,fwhm,req_filters=[]):
self.Npix=Npix
self.Pix_arcsec=Pix_arcsec
self.fov_arcmin = Narcmin
self.blank_array=blank_array*1.0
self.final_array=blank_array*1.0
self.filter_string=filter_string
self.simdata=simdata
self.image_files=[]
self.x_array=[]
self.y_array=[]
self.N_inf=[]
self.eff_lambda_microns = eff_lambda_microns
self.maglim = 28.0
self.FWHM_arcsec = fwhm
self.req_filters=req_filters
self.mstar_list = []
self.redshift_list = []
def find_image(self,mstar,redshift,sfr,seed,xpix,ypix,hmag):
sim_simname = self.simdata['col1']
sim_expfact = self.simdata['col2']
sim_sfr = self.simdata['col54']
sim_mstar = self.simdata['col56']
sim_redshift = 1.0/sim_expfact - 1.0
metalmass = self.simdata['col53']
sim_res_pc = self.simdata['col62']
sim_string = self.simdata['col60']
simage_loc = '/Users/gsnyder/Documents/Projects/HydroART_Morphology/Hyades_Data/images_rsync/'
self.mstar_list.append(mstar)
self.redshift_list.append(redshift)
adjust_size=False
print " "
print "Searching for simulation with mstar,z,seed : ", mstar, redshift, seed
wide_i = np.where(np.logical_and(np.logical_and(np.abs(sim_redshift-redshift)<0.3,np.abs(np.log10(sim_mstar)-mstar)<0.1),sim_sfr > -1))[0]
Nwi = wide_i.shape[0]
if Nwi==0:
wide_i = np.where(np.logical_and(np.logical_and(np.abs(sim_redshift-redshift)<0.5,np.abs(np.log10(sim_mstar)-mstar)<0.4),sim_sfr > -1))[0]
Nwi = wide_i.shape[0]
if Nwi==0 and (mstar < 7.1):
print " Can't find good sim, adjusting image parameters to get low mass things "
wide_i = np.where(np.abs(sim_redshift-redshift)<0.3)[0] #wide_i is a z range
llmi = np.argmin(np.log10(sim_mstar[wide_i])) #the lowest mass in this z range
wlmi = np.where(np.abs(np.log10(sim_mstar[wide_i]) - np.log10(sim_mstar[wide_i[llmi]])) < 0.3)[0] #search within 0.3 dex of lowest available sims
print " ", wide_i.shape, llmi, wlmi.shape
wide_i = wide_i[wlmi]
Nwi = wide_i.shape[0]
print " ", Nwi
adjust_size=True
#assert(wide_i.shape[0] > 0)
if Nwi==0:
print " Could not find roughly appropriate simulation for mstar,z: ", mstar, redshift
print " "
self.image_files.append('')
return 0#np.zeros(shape=(600,600)), -1
print " Found N candidates: ", wide_i.shape
np.random.seed(seed)
#choose random example and camera
rps = np.random.random_integers(0,Nwi-1,1)[0]
cn = str(np.random.random_integers(5,8,1)[0])
prefix = os.path.basename(sim_string[wide_i[rps]])
sim_realmstar = np.log10(sim_mstar[wide_i[rps]]) #we picked a sim with this log mstar
mstar_factor = sim_realmstar - mstar
rad_factor = 1.0
lum_factor = 1.0
if adjust_size==True:
rad_factor = 10.0**(mstar_factor*0.5) #must **shrink** images by this factor, total flux by mstar factor
lum_factor = 10.0**(mstar_factor)
print ">>>FACTORS<<< ", prefix, sim_realmstar, mstar_factor, rad_factor, lum_factor
im_folder = simage_loc + prefix +'_skipir/images'
im_file = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.filter_string+'_simulation.fits')
cn_file = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.filter_string+'_candelized_noise.fits')
req1 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[0]+'_simulation.fits')
req2 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[1]+'_simulation.fits')
req3 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[2]+'_simulation.fits')
req4 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[3]+'_simulation.fits')
req5 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[4]+'_simulation.fits')
## Actually, probably want to keep trying some possible galaxies/files...
is_file = os.path.lexists(im_file) and os.path.lexists(cn_file) and os.path.lexists(req1) and os.path.lexists(req2) and os.path.lexists(req3) and os.path.lexists(req4) and os.path.lexists(req5)
#is_file = os.path.lexists(im_file) and os.path.lexists(cn_file) #and os.path.lexists(req1) and os.path.lexists(req2) and os.path.lexists(req3)
if is_file==False:
print " Could not find appropriate files: ", im_file, cn_file
print " "
self.image_files.append('')
return 0 #np.zeros(shape=(600,600)), -1
self.image_files.append(im_file)
cn_header = pyfits.open(cn_file)[0].header
im_hdu = pyfits.open(im_file)[0]
scalesim = cn_header.get('SCALESIM') #pc/pix
Ps = cosmocalc.cosmocalc(redshift)['PS_kpc'] #kpc/arcsec
print " Simulation pixel size at z: ", scalesim
print " Plate scale for z: ", Ps
print " Desired Kpc/pix at z: ", Ps*self.Pix_arcsec
sunrise_image = np.float32(im_hdu.data) #W/m/m^2/Sr
Sim_Npix = sunrise_image.shape[0]
New_Npix = int( Sim_Npix*(scalesim/(1000.0*Ps*self.Pix_arcsec))/rad_factor ) #rad_factor reduces number of pixels (total size) desired
if New_Npix==0:
New_Npix=1
print " New galaxy pixel count: ", New_Npix
rebinned_image = congrid.congrid(sunrise_image,(New_Npix,New_Npix)) #/lum_factor #lum_factor shrinks surface brightness by mass factor... but we're shrinking size first, so effective total flux already adjusted by this; may need to ^^ SB instead??? or fix size adjust SB?
print " New galaxy image shape: ", rebinned_image.shape
print " New galaxy image max: ", np.max(rebinned_image)
#finite_bool = np.isfinite(rebinned_image)
#num_infinite = np.where(finite_bool==False)[0].shape[0]
#print " Number of INF pixels: ", num_infinite, prefix
#self.N_inf.append(num_infinite)
if xpix==-1:
xpix = int( (self.Npix-1)*np.random.rand()) #np.random.random_integers(0,self.Npix-1,1)[0]
ypix = int( (self.Npix-1)*np.random.rand()) #np.random.random_integers(0,self.Npix-1,1)[0]
self.x_array.append(xpix)
self.y_array.append(ypix)
x1_choice = np.asarray([int(xpix-float(New_Npix)/2.0),0])
x1i = np.argmax(x1_choice)
x1 = x1_choice[x1i]
diff=0
if x1==0:
diff = x1_choice[1]-x1_choice[0]
x2_choice = np.asarray([x1 + New_Npix - diff,self.Npix])
x2i = np.argmin(x2_choice)
x2 = int(x2_choice[x2i])
x1sim = abs(np.min(x1_choice))
x2sim = min(New_Npix,self.Npix-x1)
y1_choice = np.asarray([int(ypix-float(New_Npix)/2.0),0])
y1i = np.argmax(y1_choice)
y1 = y1_choice[y1i]
diff=0
if y1==0:
diff = y1_choice[1]-y1_choice[0]
y2_choice = np.asarray([y1 + New_Npix - diff,self.Npix])
y2i = np.argmin(y2_choice)
y2 = int(y2_choice[y2i])
y1sim = abs(np.min(y1_choice))
y2sim = min(New_Npix,self.Npix-y1)
print " Placing new image at x,y in x1:x2, y1:y2 from xsim,ysim, ", xpix, ypix, x1,x2,y1,y2, x1sim, x2sim, y1sim, y2sim
#image_slice = np.zeros_like(self.blank_array)
print " done creating image slice"
#bool_slice = np.int32( np.zeros(shape=(self.Npix,self.Npix)))
image_cutout = rebinned_image[x1sim:x2sim,y1sim:y2sim]
print " New image shape: ", image_cutout.shape
pixel_Sr = (self.Pix_arcsec**2)/sq_arcsec_per_sr #pixel area in steradians: Sr/pixel
to_nJy_per_Sr = (1.0e9)*(1.0e14)*(self.eff_lambda_microns**2)/c #((pixscale/206265.0)^2)*
#sigma_nJy = 0.3*(2.0**(-0.5))*((1.0e9)*(3631.0/5.0)*10.0**(-0.4*self.maglim))*self.Pix_arcsec*(3.0*self.FWHM_arcsec)
to_Jy_per_pix = to_nJy_per_Sr*(1.0e-9)*pixel_Sr
#b = b*(to_nJy_per_Sr_b*fluxscale*bluefact) # + np.random.randn(Npix,Npix)*sigma_nJy/pixel_Sr
image_cutout = image_cutout*to_Jy_per_pix #image_cutout*to_nJy_per_Sr
#image_slice[x1:x2,y1:y2] = image_cutout*1.0
#bool_slice[x1:x2,y1:y2]=1
print " done slicing"
#self.final_array += image_slice
self.final_array[x1:x2,y1:y2] += image_cutout
print " done adding image to final array"
#finite_bool = np.isfinite(self.final_array)
#num_infinite = np.where(finite_bool==False)[0].shape[0]
#print " Final array INF count and max:", num_infinite, np.max(self.final_array)
print " "
return 1 #sunrise_image,scalesim
def write_success_table(self,filename):
boolthing = np.ones_like(hdudf_h.mstar_list)
i_fail = np.where(np.asarray(hdudf_h.image_files)=='')[0]
print boolthing.shape, i_fail.shape
print boolthing, i_fail
boolthing[i_fail] = 0
data = np.asarray([boolthing,hdudf_h.x_array,hdudf_h.y_array])
asciitable.write(data,filename)
return
def place_image(self,x,y,galaxy_image,galaxy_pixsize):
new_image = self.final_array
return new_image
if __name__=="__main__":
mstar_list = np.asarray([8.0])
redshift_list = np.asarray([2.0])
sfr_list = np.asarray([1.5])
#instead, use observed UDF catalogs
udf_hdulist = pyfits.open('data/udf_zbest_sedfit_jen2015.fits')
udf_table = udf_hdulist[1].data
udf_zbest = udf_table.field('Z_BEST')
udf_lmstar = udf_table.field('LMSTAR_BC03')
udf_hmag = udf_table.field('MAG_F160W')
x_list = np.asarray([2500])
y_list = np.asarray([6000])
#random positions?
fi = np.where(udf_hmag > 27.0)[0]
fake_zs = np.asarray([udf_zbest[fi],udf_zbest[fi]]).flatten()
fake_lmasses = 7.0 - 1.6*np.random.random(fake_zs.shape[0])
fake_hmag = 29.0 + 4.0*np.random.random(fake_zs.shape[0])
udf_zbest = np.append(udf_zbest,fake_zs)
udf_lmstar = np.append(udf_lmstar,fake_lmasses)
udf_hmag = np.append(udf_hmag,fake_hmag)
#Npix = 27800.0/2.0
Npix = 10000.0 #16880 w/ 2.78 arcmin gives 10mas pixels
Narcmin = 1.0
Narcsec = Narcmin*60.0
#Npix_hst = 27800.0/8.0 #ish
Npix_hst = 1200.0
Pix_arcsec = Narcsec/Npix
Pix_arcsec_hst = Narcsec/Npix_hst
print "Modeling image with pixel scale (arcsec): ", Pix_arcsec
blank_array = np.float32(np.zeros(shape=(Npix,Npix)))
print blank_array.shape
blank_array_hst = np.float32(np.zeros(shape=(Npix_hst,Npix_hst)))
sim_catalog_file = '/Users/gsnyder/Documents/Projects/HydroART_Morphology/Hyades_Data/juxtaposicion-catalog-Nov18_2013/data/sim'
simdata = asciitable.read(sim_catalog_file,data_start=1)
#print simdata
rf = ['F850LP','F606W','F160W','F775W','F125W']
hdudf_h = mock_hdudf(Npix,Pix_arcsec,blank_array,'F160W',simdata,Narcmin,1.60,28.0,0.025,req_filters=rf)
hdudf_j = mock_hdudf(Npix,Pix_arcsec,blank_array,'F125W',simdata,Narcmin,1.25,28.0,0.022,req_filters=rf)
hdudf_z = mock_hdudf(Npix,Pix_arcsec,blank_array,'F850LP',simdata,Narcmin,0.90,28.0,0.015,req_filters=rf)
hdudf_i = mock_hdudf(Npix,Pix_arcsec,blank_array,'F775W',simdata,Narcmin,0.75,28.0,0.014,req_filters=rf)
hdudf_v = mock_hdudf(Npix,Pix_arcsec,blank_array,'F606W',simdata,Narcmin,0.60,28.0,0.012,req_filters=rf)
hudf_h = mock_hdudf(Npix_hst,Pix_arcsec_hst,blank_array_hst,'F160W',simdata,Narcmin,1.60,28.0,0.20,req_filters=rf)
hudf_z = mock_hdudf(Npix_hst,Pix_arcsec_hst,blank_array_hst,'F850LP',simdata,Narcmin,0.90,28.0,0.15,req_filters=rf)
hudf_v = mock_hdudf(Npix_hst,Pix_arcsec_hst,blank_array_hst,'F606W',simdata,Narcmin,0.60,28.0,0.12,req_filters=rf)
udf_success = np.int32(np.zeros_like(udf_lmstar))
for i,z in enumerate(udf_zbest):
if i > 50000:
continue
if i % 4 != 0:
continue
#i = udf_zbest.shape[0] - i - 1
result = hdudf_h.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i])
udf_success[i] = result
result = hdudf_j.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i])
result = hdudf_z.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i])
result = hdudf_i.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i])
result = hdudf_v.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i])
hudf_h.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i])
hudf_z.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i])
hudf_v.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i])
#NOTE NOW RETURNS IN Jy/pix!!
i_fail = np.where(np.asarray(hdudf_h.image_files)=='')[0]
print "Numfail: ", i_fail.shape
print udf_lmstar[0:100]
print udf_success[0:100]
successes = {'udf_z':udf_zbest, 'udf_lmstar':udf_lmstar, 'mockudf_success':udf_success}
ascii.write(successes,'hdudf_success_list.txt')
#exit()
#im,pix_pc = hdudf_h.find_image(mstar_list[0],redshift_list[0],sfr_list[0],1,x_list[0],y_list[0])
#im2 = hdudf_h.modify_and_place(im,x_list[0],y_list[0],redshift_list[0])
print hdudf_h.image_files
print hdudf_h.N_inf
#WANT ability to know which UDF entries were successful -- save image files. Pickle? FITS table?
print np.max(hdudf_h.final_array)
new_float = np.float32(hdudf_h.final_array)
print np.max(new_float)
new_bool = np.isfinite(new_float)
print np.where(new_bool==False)[0].shape[0]
primhdu = pyfits.PrimaryHDU(new_float) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec')
hdulist = pyfits.HDUList([primhdu])
hdulist.writeto('hdudf_6mas_F160W_Jy.fits',clobber=True)
primhdu = pyfits.PrimaryHDU(np.float32(hdudf_j.final_array)) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec')
hdulist = pyfits.HDUList([primhdu])
hdulist.writeto('hdudf_6mas_F125W_Jy.fits',clobber=True)
primhdu = pyfits.PrimaryHDU(np.float32(hdudf_z.final_array)) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec')
hdulist = pyfits.HDUList([primhdu])
hdulist.writeto('hdudf_6mas_F850LP_Jy.fits',clobber=True)
primhdu = pyfits.PrimaryHDU(np.float32(hdudf_i.final_array)) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec')
hdulist = pyfits.HDUList([primhdu])
hdulist.writeto('hdudf_6mas_F775W_Jy.fits',clobber=True)
primhdu = pyfits.PrimaryHDU(np.float32(hdudf_v.final_array)) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec')
hdulist = pyfits.HDUList([primhdu])
hdulist.writeto('hdudf_6mas_F606W_Jy.fits',clobber=True)
primhdu = pyfits.PrimaryHDU(np.float32(hudf_h.final_array)) ; primhdu.header['IMUNIT']=('nJy/Sr') ; primhdu.header['PIXSCALE']=(Pix_arcsec_hst, 'arcsec')
hdulist = pyfits.HDUList([primhdu])
hdulist.writeto('hudf_F160W_Jy.fits',clobber=True)
primhdu = pyfits.PrimaryHDU(np.float32(hudf_z.final_array)) ; primhdu.header['IMUNIT']=('nJy/Sr') ; primhdu.header['PIXSCALE']=(Pix_arcsec_hst, 'arcsec')
hdulist = pyfits.HDUList([primhdu])
hdulist.writeto('hudf_F850LP_Jy.fits',clobber=True)
primhdu = pyfits.PrimaryHDU(np.float32(hudf_v.final_array)) ; primhdu.header['IMUNIT']=('nJy/Sr') ; primhdu.header['PIXSCALE']=(Pix_arcsec_hst, 'arcsec')
hdulist = pyfits.HDUList([primhdu])
hdulist.writeto('hudf_F606W_Jy.fits',clobber=True)
#hdudf_h.write_success_table('F160W_successes.txt')
#b=hdudf_v.final_array
#g=hdudf_z.final_array
#r=hdudf_h.final_array
#res = render_hdudf(b,g,r,'HDUDF_v1.pdf',pixel_arcsec=Pix_arcsec)
|
mit
|
eyalfa/spark
|
python/pyspark/sql/utils.py
|
6
|
6334
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace):
self.desc = desc
self.stackTrace = stackTrace
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(),
e.java_exception.getStackTrace()))
if s.startswith('org.apache.spark.sql.AnalysisException: '):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
raise ParseException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
raise StreamingQueryException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
if s.startswith('java.lang.IllegalArgumentException: '):
raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.8.0"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
class ForeachBatchFunction(object):
"""
This is the Python implementation of Java interface 'ForeachBatchFunction'. This wraps
the user-defined 'foreachBatch' function such that it can be called from the JVM when
the query is active.
"""
def __init__(self, sql_ctx, func):
self.sql_ctx = sql_ctx
self.func = func
def call(self, jdf, batch_id):
from pyspark.sql.dataframe import DataFrame
try:
self.func(DataFrame(jdf, self.sql_ctx), batch_id)
except Exception as e:
self.error = e
raise e
class Java:
implements = ['org.apache.spark.sql.execution.streaming.sources.PythonForeachBatchFunction']
|
apache-2.0
|
xubenben/scikit-learn
|
examples/model_selection/plot_roc.py
|
96
|
4487
|
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
vishnumani2009/sklearn-fasttext
|
skfasttext/FastTextClassifier.py
|
1
|
5935
|
from sklearn.base import BaseEstimator, ClassifierMixin
import fasttext as ft
from sklearn.metrics import classification_report
class FastTextClassifier(BaseEstimator,ClassifierMixin):
"""Base classiifer of Fasttext estimator"""
def __init__(self,lpr='__label__',lr=0.1,lru=100,dim=100,ws=5,epoch=100,minc=1,neg=5,ngram=1,loss='softmax',nbucket=0,minn=0,maxn=0,thread=4,silent=0,output="model"):
"""
label_prefix label prefix ['__label__']
lr learning rate [0.1]
lr_update_rate change the rate of updates for the learning rate [100]
dim size of word vectors [100]
ws size of the context window [5]
epoch number of epochs [5]
min_count minimal number of word occurences [1]
neg number of negatives sampled [5]
word_ngrams max length of word ngram [1]
loss loss function {ns, hs, softmax} [softmax]
bucket number of buckets [0]
minn min length of char ngram [0]
maxn min length of char ngram [0]
todo : Recheck need of some of the variables, present in default classifier
"""
self.label_prefix=lpr
self.lr=lr
self.lr_update_rate=lru
self.dim=dim
self.ws=ws
self.epoch=epoch
self.min_count=minc
self.neg=neg
self.word_ngrams=ngram
self.loss=loss
self.bucket=nbucket
self.minn=minn
self.maxn=maxn
self.thread=thread
self.silent=silent
self.classifier=None
self.result=None
self.output=output
self.lpr=lpr
def fit(self,input_file):
'''
Input: takes input file in format
returns classifier object
to do: add option to feed list of X and Y or file
'''
self.classifier = ft.supervised(input_file, self.output, dim=self.dim, lr=self.lr, epoch=self.epoch, min_count=self.min_count, word_ngrams=self.word_ngrams, bucket=self.bucket, thread=self.thread, silent=self.silent, label_prefix=self.lpr)
return(None)
def predict(self,test_file,csvflag=True,k_best=1):
'''
Input: Takes input test finle in format
return results object
to do: add unit tests using sentiment analysis dataset
to do: Add K best labels options for csvflag = False
'''
try:
if csvflag==False and type(test_file) == 'list':
self.result=self.classifier.predict(test_file,k=k_best)
if csvflag:
lines=open(test_file,"r").readlines()
sentences=[line.split(" , ")[1] for line in lines]
self.result=self.classifier.predict(sentences,k_best)
except:
print("Error in input dataset.. please see if the file/list of sentences is of correct format")
sys.exit(-1)
self.result=[int(labels[0]) for labels in self.result]
return(self.result)
def report(self,ytrue,ypred):
'''
Input: predicted and true labels
return reort of classification
to do: add label option and unit testing
'''
print(classification_report(ytrue,ypred))
return None
def predict_proba(self,test_file,csvflag=True,k_best=1):
'''
Input: List of sentences
return reort of classification
to do: check output of classifier predct_proba add label option and unit testing
'''
try:
if csvflag==False and type(test_file) == 'list':
self.result=self.classifier.predict_proba(test_file,k=k_best)
if csvflag:
lines=open(test_file,"r").readlines()
sentences=[line.split(" , ")[1] for line in lines]
self.result=self.classifier.predict_proba(sentences,k_best)
except:
print("Error in input dataset.. please see if the file/list of sentences is of correct format")
sys.exit(-1)
return(self.result)
def getlabels(self):
'''
Input: None
returns: Class labels in dataset
to do : check need of the this funcion
'''
return(self.classifier.labels)
def getproperties(self):
'''
Input: Nothing, other than object self pointer
Return: None , prints the descriptions of the model hyperparameters
'''
print("The model has following hyperparameters as part of its specification")
print("Label prefix used : "+str(self.label_prefix))
print("Learning rate :"+ str(lr))
print("Learning rate update after "+str(self.lr_update_rate)+"iterations")
print("Embedding size: "+str(self.dim))
print("Epochs :"+ str(self.epochs))
print("minimal number of word occurences: "+self.min_count)
print("number of negatives sampled :"+str(self.neg))
print("max length of word ngram "+str(self.word_ngrams))
print("loss function: "+str(self.loss))
print("number of buckets "+str(self.bucket))
print("min length of char ngram:"+str(self.minn))
print("min length of char ngram"+ str(self.maxn))
return(None)
def loadpretrained(self,X):
'returns the model with pretrained weights'
self.classifier=ft.load_model(X,label_prefix=self.lpr)
|
bsd-3-clause
|
lazywei/scikit-learn
|
examples/model_selection/plot_roc_crossval.py
|
247
|
3253
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
appapantula/scikit-learn
|
sklearn/decomposition/pca.py
|
192
|
23117
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
bsd-3-clause
|
kevin-intel/scikit-learn
|
benchmarks/bench_saga.py
|
11
|
10895
|
"""Author: Arthur Mensch, Nelle Varoquaux
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
import os
from joblib import Parallel
from sklearn.utils.fixes import delayed
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False, dtype=np.float64):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X = X.astype(dtype)
y = y.astype(dtype)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=0,
max_iter=this_max_iter,
random_state=42,
)
# Makes cpu cache even for all fit calls
X_train.max()
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalty, single_target,
n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
dtypes_mapping = {
"float64": np.float64,
"float32": np.float32,
}
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
X, y = load_digits(return_X_y=True)
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(fit_single)(solver, X, y,
penalty=penalty, single_target=single_target,
dtype=dtype,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for dtype in dtypes_mapping.values())
res = []
idx = 0
for dtype_name in dtypes_mapping.keys():
for solver in solvers:
if not (skip_slow and
solver == 'lightning' and
penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
dtype=dtype_name,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot(outname=None):
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target'], inplace=True)
grouped = res.groupby(level=['single_target'])
colors = {'saga': 'C0', 'liblinear': 'C1', 'lightning': 'C2'}
linestyles = {"float32": "--", "float64": "-"}
alpha = {"float64": 0.5, "float32": 1}
for idx, group in grouped:
single_target = idx
fig, axes = plt.subplots(figsize=(12, 4), ncols=4)
ax = axes[0]
for scores, times, solver, dtype in zip(group['train_scores'],
group['times'],
group['solver'],
group["dtype"]):
ax.plot(times, scores, label="%s - %s" % (solver, dtype),
color=colors[solver],
alpha=alpha[dtype],
marker=".",
linestyle=linestyles[dtype])
ax.axvline(times[-1], color=colors[solver],
alpha=alpha[dtype],
linestyle=linestyles[dtype])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = axes[1]
for scores, times, solver, dtype in zip(group['test_scores'],
group['times'],
group['solver'],
group["dtype"]):
ax.plot(times, scores, label=solver, color=colors[solver],
linestyle=linestyles[dtype],
marker=".",
alpha=alpha[dtype])
ax.axvline(times[-1], color=colors[solver],
alpha=alpha[dtype],
linestyle=linestyles[dtype])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = axes[2]
for accuracy, times, solver, dtype in zip(group['accuracies'],
group['times'],
group['solver'],
group["dtype"]):
ax.plot(times, accuracy, label="%s - %s" % (solver, dtype),
alpha=alpha[dtype],
marker=".",
color=colors[solver], linestyle=linestyles[dtype])
ax.axvline(times[-1], color=colors[solver],
alpha=alpha[dtype],
linestyle=linestyles[dtype])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
if outname is None:
outname = name + '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
ax = axes[3]
for scores, times, solver, dtype in zip(group['train_scores'],
group['times'],
group['solver'],
group["dtype"]):
ax.plot(np.arange(len(scores)),
scores, label="%s - %s" % (solver, dtype),
marker=".",
alpha=alpha[dtype],
color=colors[solver], linestyle=linestyles[dtype])
ax.set_yscale("log")
ax.set_xlabel('# iterations')
ax.set_ylabel('Objective function')
ax.legend()
plt.savefig(outname)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
n_samples = [100000, 300000, 500000, 800000, None]
single_target = True
for penalty in penalties:
for n_sample in n_samples:
exp(solvers, penalty, single_target,
n_samples=n_sample, n_jobs=1,
dataset='rcv1', max_iter=10)
if n_sample is not None:
outname = "figures/saga_%s_%d.png" % (penalty, n_sample)
else:
outname = "figures/saga_%s_all.png" % (penalty,)
try:
os.makedirs("figures")
except OSError:
pass
plot(outname)
|
bsd-3-clause
|
fabiansinz/pipeline
|
python/pipeline/notify.py
|
1
|
1965
|
import datajoint as dj
from datajoint.jobs import key_hash
from . import experiment
schema = dj.schema('pipeline_notification', locals())
# Decorator for notification functions. Ignores exceptions.
def ignore_exceptions(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
print('Ignored exception:', e)
return wrapper
@schema
class SlackConnection(dj.Manual):
definition = """
# slack domain and api key for notification
domain : varchar(128) # slack domain
---
api_key : varchar(128) # api key for bot connection
"""
@schema
class SlackUser(dj.Manual):
definition = """
# information for user notification
-> experiment.Person
---
slack_user : varchar(128) # user on slack
-> SlackConnection
"""
def notify(self, message=None, file = None, file_title=None, file_comment=None, channel=None):
if self:
from slacker import Slacker
api_key, user = (self * SlackConnection()).fetch1('api_key','slack_user')
s = Slacker(api_key, timeout=60)
channels = ['@' + user]
if channel is not None:
channels.append(channel)
for ch in channels:
if message: # None or ''
s.chat.post_message(ch, message, as_user=True)
if file is not None:
s.files.upload(file_=file, channels=ch,
title=file_title, initial_comment=file_comment)
def temporary_image(array, key):
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
import seaborn as sns
with sns.axes_style('white'):
plt.matshow(array, cmap='gray')
plt.axis('off')
filename = '/tmp/' + key_hash(key) + '.png'
plt.savefig(filename)
sns.reset_orig()
return filename
|
lgpl-3.0
|
gomesfelipe/BDA_py_demos
|
demos_ch2/demo2_3.py
|
19
|
1931
|
"""Bayesian Data Analysis, 3rd ed
Chapter 2, demo 3
Simulate samples from Beta(438,544), draw a histogram with quantiles, and do
the same for a transformed variable.
"""
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Plotting grid
x = np.linspace(0.36, 0.54, 150)
# Draw n random samples from Beta(438,544)
n = 10000
th = beta.rvs(438, 544, size=n) # rvs comes from `random variates`
# Plot 2 subplots
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 10))
# Plot histogram
axes[0].hist(th, bins=30)
# Compute 2.5% and 97.5% quantile approximation using samples
th25, th975 = np.percentile(th, [2.5, 97.5])
# Draw lines for these
axes[0].axvline(th25, color='#e41a1c', linewidth=1.5)
axes[0].axvline(th975, color='#e41a1c', linewidth=1.5)
axes[0].text(th25, axes[0].get_ylim()[1]+15, '2.5%',
horizontalalignment='center')
axes[0].text(th975, axes[0].get_ylim()[1]+15, '97.5%',
horizontalalignment='center')
axes[0].set_xlabel(r'$\theta$', fontsize=18)
axes[0].set_yticks(())
# Plot histogram for the transformed variable
phi = (1-th)/th
axes[1].hist(phi, bins=30)
# Compute 2.5% and 97.5% quantile approximation using samples
phi25, phi975 = np.percentile(phi, [2.5, 97.5])
# Draw lines for these
axes[1].axvline(phi25, color='#e41a1c', linewidth=1.5)
axes[1].axvline(phi975, color='#e41a1c', linewidth=1.5)
axes[1].text(phi25, axes[1].get_ylim()[1]+15, '2.5%',
horizontalalignment='center')
axes[1].text(phi975, axes[1].get_ylim()[1]+15, '97.5%',
horizontalalignment='center')
axes[1].set_xlabel(r'$\phi$', fontsize=18)
axes[1].set_yticks(())
# Display the figure
plt.show()
|
gpl-3.0
|
soulmachine/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
5
|
10984
|
import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import check_scoring
from sklearn.metrics import make_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error']
CLF_SCORERS = ['accuracy', 'f1', 'roc_auc', 'average_precision', 'precision',
'recall', 'log_loss',
'adjusted_rand_score' # not really, but works
]
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
def test_check_scoring():
"""Test all branches of check_scoring"""
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
pattern = (r"The estimator passed should have a 'score'"
r" or a 'predict' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator,
"accuracy")
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_make_scorer():
"""Sanity check on the make_scorer factory function."""
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
"""Test classification scorers."""
X, y = make_blobs(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['f1'](clf, X_test, y_test)
score2 = f1_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
"""Test regression scorers."""
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = SCORERS['r2'](clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
"""Test scorers that take thresholds."""
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = SCORERS['log_loss'](clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, SCORERS['roc_auc'], clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
"""Test that the scorer work with multilabel-indicator format
for multilabel and multi-output multi-class classifier
"""
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
"""Test clustering scorers against gold standard labeling."""
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = SCORERS['adjusted_rand_score'](km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
"""Test that when a list of scores is returned, we raise proper errors."""
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
def test_scorer_sample_weight():
"""Test that scorers support sample_weight or raise sensible errors"""
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier()
sensible_clf.fit(X_train, y_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS])
for name, scorer in SCORERS.items():
try:
weighted = scorer(estimator[name], X_test, y_test,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], y_test[10:])
unweighted = scorer(estimator[name], X_test, y_test)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to 0: "
"{1} vs {2}".format(name, weighted, ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
|
bsd-3-clause
|
466152112/scikit-learn
|
examples/model_selection/plot_train_error_vs_test_error.py
|
349
|
2577
|
"""
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.