repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ElDeveloper/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
henrykironde/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
PyPSA/PyPSA | pypsa/contingency.py | 1 | 13452 | ## Copyright 2016-2017 Tom Brown (FIAS)
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Functionality for contingency analysis, such as branch outages.
"""
__author__ = "Tom Brown (FIAS), Fabian Neumann (KIT)"
__copyright__ = "Copyright 2016-2017 Tom Brown (FIAS), 2020 Fabian Neumann (KIT), GNU GPL 3"
from scipy.sparse import issparse, csr_matrix, csc_matrix, hstack as shstack
from numpy import r_, ones, zeros
import logging
logger = logging.getLogger(__name__)
import numpy as np
import pandas as pd
from collections.abc import Iterable
from .descriptors import get_extendable_i, get_non_extendable_i
from .pf import calculate_PTDF, _as_snapshots
from .opt import l_constraint
from .linopt import set_conref, write_constraint, get_var, linexpr
def calculate_BODF(sub_network, skip_pre=False):
"""
Calculate the Branch Outage Distribution Factor (BODF) for
sub_network.
Sets sub_network.BODF as a (dense) numpy array.
The BODF is a num_branch x num_branch 2d array.
For the outage of branch l, the new flow on branch k is
given in terms of the flow before the outage
f_k^after = f_k^before + BODF_{kl} f_l^before
Note that BODF_{ll} = -1.
Parameters
----------
sub_network : pypsa.SubNetwork
skip_pre : bool, default False
Skip the preliminary step of computing the PTDF.
Examples
--------
>>> sub_network.caculate_BODF()
"""
if not skip_pre:
calculate_PTDF(sub_network)
num_branches = sub_network.PTDF.shape[0]
#build LxL version of PTDF
branch_PTDF = sub_network.PTDF*sub_network.K
denominator = csr_matrix((1/(1-np.diag(branch_PTDF)),(r_[:num_branches],r_[:num_branches])))
sub_network.BODF = branch_PTDF*denominator
#make sure the flow on the branch itself is zero
np.fill_diagonal(sub_network.BODF,-1)
def network_lpf_contingency(network, snapshots=None, branch_outages=None):
"""
Computes linear power flow for a selection of branch outages.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
NB: currently this only works for a single snapshot
branch_outages : list-like
A list of passive branches which are to be tested for outages.
If None, it's take as all network.passive_branches_i()
Returns
-------
p0 : pandas.DataFrame
num_passive_branch x num_branch_outages DataFrame of new power flows
Examples
--------
>>> network.lpf_contingency(snapshot, branch_outages)
"""
if snapshots is None:
snapshots = network.snapshots
if isinstance(snapshots, Iterable):
logger.warning("Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.")
snapshot = snapshots[0]
else:
snapshot = snapshots
network.lpf(snapshot)
# Store the flows from the base case
passive_branches = network.passive_branches()
if branch_outages is None:
branch_outages = passive_branches.index
p0_base = pd.concat({c: network.pnl(c).p0.loc[snapshot]
for c in network.passive_branch_components})
p0 = p0_base.to_frame('base')
for sn in network.sub_networks.obj:
sn._branches = sn.branches()
sn.calculate_BODF()
for branch in branch_outages:
if not isinstance(branch, tuple):
logger.warning("No type given for {}, assuming it is a line".format(branch))
branch = ("Line",branch)
sn = network.sub_networks.obj[passive_branches.sub_network[branch]]
branch_i = sn._branches.index.get_loc(branch)
p0_new = p0_base + pd.Series(sn.BODF[:,branch_i]*p0_base[branch],sn._branches.index)
p0[branch] = p0_new
return p0
def add_contingency_constraints(network,snapshots):
passive_branches = network.passive_branches()
branch_outages = network._branch_outages
#prepare the sub networks by calculating BODF and preparing helper DataFrames
for sn in network.sub_networks.obj:
sn.calculate_BODF()
sn._branches = sn.branches()
sn._branches["_i"] = range(sn._branches.shape[0])
sn._extendable_branches = sn._branches[sn._branches.s_nom_extendable]
sn._fixed_branches = sn._branches[~ sn._branches.s_nom_extendable]
#a list of tuples with branch_outage and passive branches in same sub_network
branch_outage_keys = []
flow_upper = {}
flow_lower = {}
for branch in branch_outages:
if type(branch) is not tuple:
logger.warning("No type given for {}, assuming it is a line".format(branch))
branch = ("Line",branch)
sub = network.sub_networks.at[passive_branches.at[branch,"sub_network"],"obj"]
branch_i = sub._branches.at[branch,"_i"]
branch_outage_keys.extend([(branch[0],branch[1],b[0],b[1]) for b in sub._branches.index])
flow_upper.update({(branch[0],branch[1],b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn]),(sub.BODF[sub._branches.at[b,"_i"],branch_i],network.model.passive_branch_p[branch[0],branch[1],sn])],"<=",sub._fixed_branches.at[b,"s_nom"]] for b in sub._fixed_branches.index for sn in snapshots})
flow_upper.update({(branch[0],branch[1],b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn]),(sub.BODF[sub._branches.at[b,"_i"],branch_i],network.model.passive_branch_p[branch[0],branch[1],sn]),(-1,network.model.passive_branch_s_nom[b[0],b[1]])],"<=",0] for b in sub._extendable_branches.index for sn in snapshots})
flow_lower.update({(branch[0],branch[1],b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn]),(sub.BODF[sub._branches.at[b,"_i"],branch_i],network.model.passive_branch_p[branch[0],branch[1],sn])],">=",-sub._fixed_branches.at[b,"s_nom"]] for b in sub._fixed_branches.index for sn in snapshots})
flow_lower.update({(branch[0],branch[1],b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn]),(sub.BODF[sub._branches.at[b,"_i"],branch_i],network.model.passive_branch_p[branch[0],branch[1],sn]),(1,network.model.passive_branch_s_nom[b[0],b[1]])],">=",0] for b in sub._extendable_branches.index for sn in snapshots})
l_constraint(network.model,"contingency_flow_upper",flow_upper,branch_outage_keys,snapshots)
l_constraint(network.model,"contingency_flow_lower",flow_lower,branch_outage_keys,snapshots)
def add_contingency_constraints_lowmem(network, snapshots):
n = network
if not hasattr(n, "_branch_outages"):
n._branch_outages = n.passive_branches().index
branch_outages = [b
if isinstance(b, tuple) else ("Line", b)
for b in n._branch_outages
]
comps = n.passive_branch_components & set(n.variables.index.levels[0])
if len(comps) == 0: return
dispatch_vars = pd.concat({c: get_var(n, c, "s") for c in comps}, axis=1)
invest_vars = pd.concat({
c: get_var(n, c, "s_nom")
if not get_extendable_i(n, c).empty
else pd.Series(dtype=float)
for c in comps
})
constraints = {}
for sn in n.sub_networks.obj:
sn.calculate_BODF()
branches_i = sn.branches_i()
branches = sn.branches()
outages = branches_i.intersection(branch_outages)
ext_i = branches.loc[branches.s_nom_extendable].index
fix_i = branches.loc[~branches.s_nom_extendable].index
p = dispatch_vars[branches_i]
BODF = pd.DataFrame(sn.BODF, index=branches_i, columns=branches_i)
lhs = {}
rhs = {}
for outage in outages:
def contingency_flow(branch):
if branch.name == outage:
return linexpr((0, branch))
flow = linexpr((1, branch))
added_flow = linexpr((BODF.at[branch.name, outage], p[outage]))
return flow + added_flow
lhs_flow = p.apply(contingency_flow, axis=0)
if len(fix_i):
lhs_flow_fix = lhs_flow[fix_i]
s_nom_fix = branches.loc[fix_i, "s_nom"]
key = ("upper", "non_ext", outage)
lhs[key] = lhs_flow_fix
rhs[key] = s_nom_fix
key = ("lower", "non_ext", outage)
lhs[key] = lhs_flow_fix
rhs[key] = - s_nom_fix
if len(ext_i):
lhs_flow_ext = lhs_flow[ext_i]
s_nom_ext = invest_vars[ext_i]
key = ("upper", "ext", outage)
lhs[key] = lhs_flow_ext + linexpr((-1, s_nom_ext))
rhs[key] = 0
key = ("lower", "ext", outage)
lhs[key] = lhs_flow_ext + linexpr((1, s_nom_ext))
rhs[key] = 0
for k in lhs.keys():
sense = "<=" if k[0] == "upper" else ">="
axes = (lhs[k].index, lhs[k].columns)
con = write_constraint(n, lhs[k], sense, rhs[k], axes)
if k not in constraints.keys():
constraints[k] = []
constraints[k].append(con)
for (bound, spec, outage), constr in constraints.items():
constr = pd.concat(constr, axis=1)
for c in comps:
if c in constr.columns.levels[0]:
constr_name = "_".join([bound, *outage])
set_conref(n, constr[c], c,
f"mu_contingency_{constr_name}",
spec=spec)
def network_sclopf(network, snapshots=None, branch_outages=None, solver_name="glpk",
pyomo=True, skip_pre=False, extra_functionality=None, solver_options={},
keep_files=False, formulation="kirchhoff", ptdf_tolerance=0.):
"""
Computes Security-Constrained Linear Optimal Power Flow (SCLOPF).
This ensures that no branch is overloaded even given the branch outages.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
branch_outages : list-like
A list of passive branches which are to be tested for outages.
If None, it's take as all network.passive_branches_i()
solver_name : string
Must be a solver name that pyomo recognises and that is
installed, e.g. "glpk", "gurobi"
pyomo : bool, default True
Whether to use pyomo for building and solving the model, setting
this to False saves a lot of memory and time.
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating
dependent values and finding bus controls.
extra_functionality : callable function
This function must take two arguments
`extra_functionality(network,snapshots)` and is called after
the model building is complete, but before it is sent to the
solver. It allows the user to add/change constraints and
add/change the objective function.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
formulation : string, default "kirchhoff"
Formulation of the linear power flow equations to use; must be
one of ["angles","cycles","kirchoff","ptdf"]
ptdf_tolerance : float
Returns
-------
None
Examples
--------
>>> network.sclopf(network, branch_outages)
"""
if not skip_pre:
network.determine_network_topology()
snapshots = _as_snapshots(network, snapshots)
passive_branches = network.passive_branches()
if branch_outages is None:
branch_outages = passive_branches.index
# save to network for extra_functionality
network._branch_outages = branch_outages
def _extra_functionality(network, snapshots):
if pyomo:
add_contingency_constraints(network, snapshots)
else:
add_contingency_constraints_lowmem(network, snapshots)
if extra_functionality is not None:
extra_functionality(network, snapshots)
pyomo_kwargs = {}
if pyomo:
pyomo_kwargs["ptdf_tolerance"] = ptdf_tolerance
#need to skip preparation otherwise it recalculates the sub-networks
network.lopf(snapshots=snapshots, solver_name=solver_name, pyomo=pyomo,
skip_pre=True, extra_functionality=_extra_functionality,
solver_options=solver_options, keep_files=keep_files,
formulation=formulation, **pyomo_kwargs)
| gpl-3.0 |
jakobworldpeace/scikit-learn | sklearn/preprocessing/tests/test_data.py | 30 | 61609 |
# Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names([u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.nan, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, 'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert_true(pipeline._pairwise)
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def test_transform_selected_copy_arg():
# transformer that alters X
def _mutating_transformer(X):
X[0, 0] = X[0, 0] + 1
return X
original_X = np.asarray([[1, 2], [3, 4]])
expected_Xtr = [[2, 2], [3, 4]]
X = original_X.copy()
Xtr = _transform_selected(X, _mutating_transformer, copy=True,
selected='all')
assert_array_equal(toarray(X), toarray(original_X))
assert_array_equal(toarray(Xtr), expected_Xtr)
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
| bsd-3-clause |
samzhang111/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
cbertinato/pandas | pandas/tests/test_multilevel.py | 1 | 81201 | import datetime
from io import StringIO
import itertools
from itertools import product
from warnings import catch_warnings, simplefilter
import numpy as np
from numpy.random import randn
import pytest
import pytz
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
import pandas as pd
from pandas import DataFrame, Series, Timestamp, isna
from pandas.core.index import Index, MultiIndex
import pandas.util.testing as tm
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew', 'mad',
'std', 'var', 'sem']
class Base:
def setup_method(self, method):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = zip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
self.tdf = tm.makeTimeDataFrame(100)
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'], inplace=True)
class TestMultiLevel(Base):
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_append_index(self):
idx1 = Index([1.1, 1.2, 1.3])
idx2 = pd.date_range('2011-01-01', freq='D', periods=3,
tz='Asia/Tokyo')
idx3 = Index(['A', 'B', 'C'])
midx_lv2 = MultiIndex.from_arrays([idx1, idx2])
midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3])
result = idx1.append(midx_lv2)
# see gh-7112
tz = pytz.timezone('Asia/Tokyo')
expected_tuples = [(1.1, tz.localize(datetime.datetime(2011, 1, 1))),
(1.2, tz.localize(datetime.datetime(2011, 1, 2))),
(1.3, tz.localize(datetime.datetime(2011, 1, 3)))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
tm.assert_index_equal(result, expected)
result = midx_lv2.append(idx1)
expected = Index(expected_tuples + [1.1, 1.2, 1.3])
tm.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv2)
expected = MultiIndex.from_arrays([idx1.append(idx1),
idx2.append(idx2)])
tm.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv3)
tm.assert_index_equal(result, expected)
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
np.array([(1.1, tz.localize(datetime.datetime(2011, 1, 1)), 'A'),
(1.2, tz.localize(datetime.datetime(2011, 1, 2)), 'B'),
(1.3, tz.localize(datetime.datetime(2011, 1, 3)), 'C')] +
expected_tuples), None)
tm.assert_index_equal(result, expected)
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
assert isinstance(multi.index, MultiIndex)
assert not isinstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
assert isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']), np.array(
['x', 'y', 'x', 'y'])])
assert isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'], ['x', 'y', 'x', 'y']])
assert isinstance(multi.index, MultiIndex)
multi = Series(range(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
assert isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
tm.assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
tm.assert_series_equal(result, expected, check_names=False)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
tm.assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
tm.assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(level='month').transform(
np.sum)
expected = op(self.ymd['A'], broadcasted)
expected.name = 'A'
tm.assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
unpickled = tm.round_trip_pickle(frame)
tm.assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
expected = self.frame.iloc[[0, 3]]
reindexed = self.frame.loc[[('foo', 'one'), ('bar', 'one')]]
tm.assert_frame_equal(reindexed, expected)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
tm.assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
assert chunk.index is new_index
chunk = self.ymd.loc[new_index]
assert chunk.index is new_index
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
chunk = self.ymd.ix[new_index]
assert chunk.index is new_index
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
assert chunk.columns is new_index
chunk = ymdT.loc[:, new_index]
assert chunk.columns is new_index
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
assert lines[2].startswith('a 0 foo')
def test_delevel_infer_dtype(self):
tuples = [tuple
for tuple in product(
['foo', 'bar'], [10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples, names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
assert is_integer_dtype(deleveled['prm1'])
assert is_float_dtype(deleveled['prm2'])
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
assert len(deleveled.columns) == len(self.ymd.columns)
assert deleveled.index.name == self.ymd.index.name
deleveled = self.series.reset_index()
assert isinstance(deleveled, DataFrame)
assert len(deleveled.columns) == len(self.series.index.levels) + 1
assert deleveled.index.name == self.series.index.name
deleveled = self.series.reset_index(drop=True)
assert isinstance(deleveled, Series)
assert deleveled.index.name == self.series.index.name
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count()
expected = expected.reindex_like(result).astype('i8')
tm.assert_frame_equal(result, expected)
self.frame.iloc[1, [1, 2]] = np.nan
self.frame.iloc[7, [0, 1]] = np.nan
self.ymd.iloc[1, [1, 2]] = np.nan
self.ymd.iloc[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
with pytest.raises(TypeError, match='hierarchical'):
df.count(level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
tm.assert_index_equal(result.columns, Index(list('ABC'), name='exp'))
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'], ['one', 'two',
'three', 'four']],
codes=[[0, 0, 0, 2, 2], [2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
tm.assert_series_equal(
result.astype('f8'), expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
tm.assert_series_equal(
result.astype('f8'), expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0], name='A')
tm.assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame(index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_get_level_number_out_of_bounds(self):
with pytest.raises(IndexError, match="Too many levels"):
self.frame.index._get_level_number(2)
with pytest.raises(IndexError, match="not a valid level number"):
self.frame.index._get_level_number(-3)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked.unstack()
# test that ints work
self.ymd.astype(int).unstack()
# test that int32 work
self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0), (
1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert left.index.is_unique is False
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(np.arange(12).reshape(4, 3),
index=list('abab'),
columns=['1st', '2nd', '3rd'])
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd', '3rd']],
codes=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
np.arange(3), 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ['1st', '2nd', '1st']
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd']], codes=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ('a', 2), ('b', 1), ('a', 1), ('b', 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(levels=[['a', 'b'], [1, 2], ['1st', '2nd']],
codes=[np.tile(
np.arange(2).repeat(3), 2), np.repeat(
[1, 0, 1], [3, 6, 3]), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df['foo'].stack().sort_index()
tm.assert_series_equal(stacked['foo'], result, check_names=False)
assert result.name is None
assert stacked['bar'].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive', 'activ', 'activ',
'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(
restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
assert unstacked.index.name == 'first'
assert unstacked.columns.names == ['exp', 'second']
restacked = unstacked.stack()
assert restacked.index.names == self.frame.index.names
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
tm.assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
tm.assert_frame_equal(restacked, self.ymd)
assert restacked.index.names == self.ymd.index.names
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])
def test_stack_names_and_numbers(self):
unstacked = self.ymd.unstack(['year', 'month'])
# Can't use mixture of names and numbers to stack
with pytest.raises(ValueError, match="level should contain"):
unstacked.stack([0, 'month'])
def test_stack_multiple_out_of_bounds(self):
# nlevels == 3
unstacked = self.ymd.unstack(['year', 'month'])
with pytest.raises(IndexError, match="Too many levels"):
unstacked.stack([2, 3])
with pytest.raises(IndexError, match="not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'],
freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02',
'2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10',
'2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(
['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
tm.assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU').mean()
rs = down.stack('ID')
xp = unst.loc[:, ['VAR1']].resample('W-THU').mean().stack('ID')
xp.columns.name = 'Params'
tm.assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = DataFrame({'A': ['a1', 'a2'], 'B': ['b1', 'b2'], 'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
assert len(stacked) > len(stacked.dropna())
stacked = df.unstack().stack(dropna=True)
tm.assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 1
]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
tm.assert_series_equal(result, expected, check_names=False)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, codes)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
assert len(result.columns) == 4
recons = result.stack()
tm.assert_frame_equal(recons, df)
@pytest.mark.slow
def test_unstack_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame(np.random.randn(2 ** 16, 2),
index=[np.arange(2 ** 16), np.arange(2 ** 16)])
with pytest.raises(ValueError, match='int32 overflow'):
df.unstack()
def test_stack_order_with_unsorted_levels(self):
# GH 16323
def manual_compare_stacked(df, df_stacked, lev0, lev1):
assert all(df.loc[row, col] ==
df_stacked.loc[(row, col[lev0]), col[lev1]]
for row in df.index for col in df.columns)
# deep check for 1-row case
for width in [2, 3]:
levels_poss = itertools.product(
itertools.permutations([0, 1, 2], width),
repeat=2)
for levels in levels_poss:
columns = MultiIndex(levels=levels,
codes=[[0, 0, 1, 1],
[0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)])
for stack_lev in range(2):
df_stacked = df.stack(stack_lev)
manual_compare_stacked(df, df_stacked,
stack_lev, 1 - stack_lev)
# check multi-row case
mi = MultiIndex(levels=[["A", "C", "B"], ["B", "A", "C"]],
codes=[np.repeat(range(3), 3), np.tile(range(3), 3)])
df = DataFrame(columns=mi, index=range(5),
data=np.arange(5 * len(mi)).reshape(5, -1))
manual_compare_stacked(df, df.stack(0), 0, 1)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
codes=[[0], [0], [0]],
names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'), (
'f2', 's1'), ('f2', 's2'), ('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.loc(axis=1)[df.columns.map(
lambda u: u[0] in ['f2', 'f3'])]
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
assert (result.columns == ['f2', 'f3']).all()
def test_join(self):
a = self.frame.loc[self.frame.index[:5], ['A']]
b = self.frame.loc[self.frame.index[2:], ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
assert not np.isnan(joined.values).all()
# TODO what should join do with names ?
tm.assert_frame_equal(joined, expected, check_names=False)
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel()
swapped2 = self.frame['A'].swaplevel(0)
swapped3 = self.frame['A'].swaplevel(0, 1)
swapped4 = self.frame['A'].swaplevel('first', 'second')
assert not swapped.index.equals(self.frame.index)
tm.assert_series_equal(swapped, swapped2)
tm.assert_series_equal(swapped, swapped3)
tm.assert_series_equal(swapped, swapped4)
back = swapped.swaplevel()
back2 = swapped.swaplevel(0)
back3 = swapped.swaplevel(0, 1)
back4 = swapped.swaplevel('second', 'first')
assert back.index.equals(self.frame.index)
tm.assert_series_equal(back, back2)
tm.assert_series_equal(back, back3)
tm.assert_series_equal(back, back4)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
tm.assert_frame_equal(swapped, exp)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
tm.assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
tm.assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError, match='hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with pytest.raises(IndexError, match='Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
assert isinstance(df.columns, MultiIndex)
assert (df[2000, 1, 10] == df[2000, 1, 7]).all()
def test_alignment(self):
x = Series(data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), (
"A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), (
"Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
tm.assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
tm.assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
tm.assert_series_equal(result, expect, check_names=False)
assert result.index.name == 'b'
result = series.count(level='a')
expect = self.series.count(level=0)
tm.assert_series_equal(result, expect, check_names=False)
assert result.index.name == 'a'
msg = "Level x not found"
with pytest.raises(KeyError, match=msg):
series.count('x')
with pytest.raises(KeyError, match=msg):
frame.count(level='x')
@pytest.mark.parametrize('op', AGG_FUNCTIONS)
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('skipna', [True, False])
@pytest.mark.parametrize('sort', [True, False])
def test_series_group_min_max(self, op, level, skipna, sort):
# GH 17537
grouped = self.series.groupby(level=level, sort=sort)
# skipna=True
leftside = grouped.agg(lambda x: getattr(x, op)(skipna=skipna))
rightside = getattr(self.series, op)(level=level, skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level)
tm.assert_series_equal(leftside, rightside)
@pytest.mark.parametrize('op', AGG_FUNCTIONS)
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('axis', [0, 1])
@pytest.mark.parametrize('skipna', [True, False])
@pytest.mark.parametrize('sort', [True, False])
def test_frame_group_ops(self, op, level, axis, skipna, sort):
# GH 17537
self.frame.iloc[1, [1, 2]] = np.nan
self.frame.iloc[7, [0, 1]] = np.nan
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis, sort=sort)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level, axis=axis)
frame = frame.sort_index(level=level, axis=axis)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
tm.assert_index_equal(leftside._get_axis(axis), level_index)
tm.assert_index_equal(rightside._get_axis(axis), level_index)
tm.assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
tm.assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
tm.assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
tm.assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10), np.tile(
np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
tm.assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
tm.assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
tm.assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
tm.assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
# TODO groupby with level_values drops names
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.names == self.ymd.index.names[:2]
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
tm.assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'), (
'bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df._consolidate()
def test_loc_preserve_names(self):
result = self.ymd.loc[2000]
result2 = self.ymd['A'].loc[2000]
assert result.index.names == self.ymd.index.names[1:]
assert result2.index.names == self.ymd.index.names[1:]
result = self.ymd.loc[2000, 2]
result2 = self.ymd['A'].loc[2000, 2]
assert result.index.name == self.ymd.index.names[2]
assert result2.index.name == self.ymd.index.names[2]
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
assert unstacked['A', 1].dtype == np.float64
assert unstacked['E', 1].dtype == np.object_
assert unstacked['F', 1].dtype == np.float64
def test_unstack_group_index_overflow(self):
codes = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
codes=[codes] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
assert result.shape == (500, 2)
# test roundtrip
stacked = result.stack()
tm.assert_series_equal(s, stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
codes=[np.arange(2).repeat(500)] + [codes] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
assert result.shape == (500, 2)
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
codes=([codes] * 4 + [np.arange(2).repeat(500)] +
[codes] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
assert result.shape == (500, 2)
def test_pyint_engine(self):
# GH 18519 : when combinations of codes cannot be represented in 64
# bits, the index underlying the MultiIndex engine works with Python
# integers, rather than uint64.
N = 5
keys = [tuple(l) for l in [[0] * 10 * N,
[1] * 10 * N,
[2] * 10 * N,
[np.nan] * N + [2] * 9 * N,
[0] * N + [2] * 9 * N,
[np.nan] * N + [2] * 8 * N + [0] * N]]
# Each level contains 4 elements (including NaN), so it is represented
# in 2 bits, for a total of 2*N*10 = 100 > 64 bits. If we were using a
# 64 bit engine and truncating the first levels, the fourth and fifth
# keys would collide; if truncating the last levels, the fifth and
# sixth; if rotating bits rather than shifting, the third and fifth.
for idx in range(len(keys)):
index = MultiIndex.from_tuples(keys)
assert index.get_loc(keys[idx]) == idx
expected = np.arange(idx + 1, dtype=np.intp)
result = index.get_indexer([keys[i] for i in expected])
tm.assert_numpy_array_equal(result, expected)
# With missing key:
idces = range(len(keys))
expected = np.array([-1] + list(idces), dtype=np.intp)
missing = tuple([0, 1] * 5 * N)
result = index.get_indexer([missing] + [keys[i] for i in idces])
tm.assert_numpy_array_equal(result, expected)
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0), (
'foo', 'qux', 0)], [0, 1]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.loc[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"):
series[('foo', 'bar', 0), 2]
result = frame.loc[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'), (
'foo', 'qux')], [0, 1]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.loc[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = frame.loc[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
tm.assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
tm.assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
tm.assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
tm.assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.loc[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
tm.assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
tm.assert_series_equal(expected, result, check_names=False)
tm.assert_frame_equal(df1, df2)
assert result.name == 'a'
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
tm.assert_frame_equal(expected, result)
tm.assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.iloc[[0, 1, 2, 7, 8, 9]]
tm.assert_frame_equal(result, expected)
result = self.frame.T.reindex(['foo', 'qux'], axis=1, level=0)
tm.assert_frame_equal(result, expected.T)
result = self.frame.loc[['foo', 'qux']]
tm.assert_frame_equal(result, expected)
result = self.frame['A'].loc[['foo', 'qux']]
tm.assert_series_equal(result, expected['A'])
result = self.frame.T.loc[:, ['foo', 'qux']]
tm.assert_frame_equal(result, expected.T)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.iloc[[0, 1, 2, 5, 6]]
tm.assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]]
tm.assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.iloc[[0, 1, 2, 5, 6]].T
tm.assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]].T
tm.assert_frame_equal(result, expected)
def test_drop_level_nonunique_datetime(self):
# GH 12701
idx = Index([2, 3, 4, 4, 5], name='id')
idxdt = pd.to_datetime(['201603231400',
'201603231500',
'201603231600',
'201603231600',
'201603231700'])
df = DataFrame(np.arange(10).reshape(5, 2),
columns=list('ab'), index=idx)
df['tstamp'] = idxdt
df = df.set_index('tstamp', append=True)
ts = Timestamp('201603231600')
assert df.index.is_unique is False
result = df.drop(ts, level='tstamp')
expected = df.loc[idx != 4]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('box', [Series, DataFrame])
def test_drop_tz_aware_timestamp_across_dst(self, box):
# GH 21761
start = Timestamp('2017-10-29', tz='Europe/Berlin')
end = Timestamp('2017-10-29 04:00:00', tz='Europe/Berlin')
index = pd.date_range(start, end, freq='15min')
data = box(data=[1] * len(index), index=index)
result = data.drop(start)
expected_start = Timestamp('2017-10-29 00:15:00', tz='Europe/Berlin')
expected_idx = pd.date_range(expected_start, end, freq='15min')
expected = box(data=[1] * len(expected_idx), index=expected_idx)
tm.assert_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
assert result.index.names == ('one', 'two')
def test_unicode_repr_issues(self):
levels = [Index(['a/\u03c3', 'b/\u03c3', 'c/\u03c3']),
Index([0, 1])]
codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, codes=codes)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=['\u0394', 'i1'])
s = Series(range(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_frame_dict_constructor_empty_series(self):
s1 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)]))
s2 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
index = list(df.index)
index[0] = ("faz", "boo")
df.index = index
repr(df)
# this travels an improper code path
index[0] = ["faz", "boo"]
df.index = index
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
codes=[[1, 1, 1, 1, -1, 0, 0, 0],
[0, 1, 2, 3, 0, 1, 2, 3]])
assert isna(index[4][0])
assert isna(index.values[4][0])
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo', 'demo', 'demo', 'demo']
idx = MultiIndex.from_tuples(idx_tp, names=['STK_ID', 'RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
assert len(result) == 3
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['bah', 'bam', 3.0, 3],
['bah', 'bam', 4.0, 4], ['foo', 'bar', 5.0, 5],
['bah', 'bam', 6.0, 6]],
columns=list('ABCD'))
df = df.set_index(['A', 'B'])
df = df.sort_index(level=0)
expected = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['foo', 'bar', 5.0, 5]],
columns=list('ABCD')).set_index(['A', 'B'])
result = df.loc[('foo', 'bar')]
tm.assert_frame_equal(result, expected)
def test_duplicated_drop_duplicates(self):
# GH 4060
idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2, 3], [1, 1, 1, 1, 2, 2]))
expected = np.array(
[False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep='last'), expected)
expected = np.array([True, False, False, True, False, False])
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df['tuples'] = tuples
index = MultiIndex.from_tuples(df['tuples'])
# it works!
df.set_index(index)
def test_datetimeindex(self):
idx1 = pd.DatetimeIndex(
['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'
] * 2, tz='Asia/Tokyo')
idx2 = pd.date_range('2010/01/01', periods=6, freq='M',
tz='US/Eastern')
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00',
'2013-04-03 9:00'], tz='Asia/Tokyo')
tm.assert_index_equal(idx.levels[0], expected1)
tm.assert_index_equal(idx.levels[1], idx2)
# from datetime combos
# GH 7888
date1 = datetime.date.today()
date2 = datetime.datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product(
[date1, date2, date3], [date1, date2, date3]):
index = MultiIndex.from_product([[d1], [d2]])
assert isinstance(index.levels[0], pd.DatetimeIndex)
assert isinstance(index.levels[1], pd.DatetimeIndex)
def test_constructor_with_tz(self):
index = pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
name='dt1', tz='US/Pacific')
columns = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'],
name='dt2', tz='Asia/Tokyo')
result = MultiIndex.from_arrays([index, columns])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
def test_set_index_datetime(self):
# GH 3950
df = DataFrame(
{'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
'value': range(6)})
df.index = pd.to_datetime(df.pop('datetime'), utc=True)
df.index = df.index.tz_convert('US/Pacific')
expected = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], name='datetime')
expected = expected.tz_localize('UTC').tz_convert('US/Pacific')
df = df.set_index('label', append=True)
tm.assert_index_equal(df.index.levels[0], expected)
tm.assert_index_equal(df.index.levels[1],
Index(['a', 'b'], name='label'))
df = df.swaplevel(0, 1)
tm.assert_index_equal(df.index.levels[0],
Index(['a', 'b'], name='label'))
tm.assert_index_equal(df.index.levels[1], expected)
df = DataFrame(np.random.random(6))
idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
tz='US/Eastern')
idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00',
'2012-04-01 09:00', '2012-04-02 09:00',
'2012-04-02 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Eastern')
expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
# GH 7092
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
def test_reset_index_datetime(self):
# GH 3950
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz,
name='idx1')
idx2 = Index(range(5), name='idx2', dtype='int64')
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
idx3 = pd.date_range('1/1/2012', periods=5, freq='MS',
tz='Europe/Paris', name='idx3')
idx = MultiIndex.from_arrays([idx1, idx2, idx3])
df = DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'idx3': [datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1),
datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1),
datetime.datetime(2012, 5, 1)],
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'idx3', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: Timestamp(d, tz=tz))
expected['idx3'] = expected['idx3'].apply(
lambda d: Timestamp(d, tz='Europe/Paris'))
tm.assert_frame_equal(df.reset_index(), expected)
# GH 7793
idx = MultiIndex.from_product([['a', 'b'], pd.date_range(
'20130101', periods=3, tz=tz)])
df = DataFrame(
np.arange(6, dtype='int64').reshape(
6, 1), columns=['a'], index=idx)
expected = DataFrame({'level_0': 'a a a b b b'.split(),
'level_1': [
datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 2),
datetime.datetime(2013, 1, 3)] * 2,
'a': np.arange(6, dtype='int64')},
columns=['level_0', 'level_1', 'a'])
expected['level_1'] = expected['level_1'].apply(
lambda d: Timestamp(d, freq='D', tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_reset_index_period(self):
# GH 7746
idx = MultiIndex.from_product(
[pd.period_range('20130101', periods=3, freq='M'), list('abc')],
names=['month', 'feature'])
df = DataFrame(np.arange(9, dtype='int64').reshape(-1, 1),
index=idx, columns=['a'])
expected = DataFrame({
'month': ([pd.Period('2013-01', freq='M')] * 3 +
[pd.Period('2013-02', freq='M')] * 3 +
[pd.Period('2013-03', freq='M')] * 3),
'feature': ['a', 'b', 'c'] * 3,
'a': np.arange(9, dtype='int64')
}, columns=['month', 'feature', 'a'])
tm.assert_frame_equal(df.reset_index(), expected)
def test_reset_index_multiindex_columns(self):
levels = [['A', ''], ['B', 'b']]
df = DataFrame([[0, 2], [1, 3]],
columns=MultiIndex.from_tuples(levels))
result = df[['B']].rename_axis('A').reset_index()
tm.assert_frame_equal(result, df)
# gh-16120: already existing column
with pytest.raises(ValueError,
match=(r"cannot insert \('A', ''\), "
"already exists")):
df.rename_axis('A').reset_index()
# gh-16164: multiindex (tuple) full key
result = df.set_index([('A', '')]).reset_index()
tm.assert_frame_equal(result, df)
# with additional (unnamed) index level
idx_col = DataFrame([[0], [1]],
columns=MultiIndex.from_tuples([('level_0', '')]))
expected = pd.concat([idx_col, df[[('B', 'b'), ('A', '')]]], axis=1)
result = df.set_index([('B', 'b')], append=True).reset_index()
tm.assert_frame_equal(result, expected)
# with index name which is a too long tuple...
with pytest.raises(ValueError,
match=("Item must have length equal "
"to number of levels.")):
df.rename_axis([('C', 'c', 'i')]).reset_index()
# or too short...
levels = [['A', 'a', ''], ['B', 'b', 'i']]
df2 = DataFrame([[0, 2], [1, 3]],
columns=MultiIndex.from_tuples(levels))
idx_col = DataFrame([[0], [1]],
columns=MultiIndex.from_tuples([('C', 'c', 'ii')]))
expected = pd.concat([idx_col, df2], axis=1)
result = df2.rename_axis([('C', 'c')]).reset_index(col_fill='ii')
tm.assert_frame_equal(result, expected)
# ... which is incompatible with col_fill=None
with pytest.raises(ValueError,
match=("col_fill=None is incompatible with "
r"incomplete column name \('C', 'c'\)")):
df2.rename_axis([('C', 'c')]).reset_index(col_fill=None)
# with col_level != 0
result = df2.rename_axis([('c', 'ii')]).reset_index(col_level=1,
col_fill='C')
tm.assert_frame_equal(result, expected)
def test_set_index_period(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = pd.period_range('2011-01-01', periods=3, freq='M')
idx1 = idx1.append(idx1)
idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
idx2 = idx2.append(idx2).append(idx2)
idx3 = pd.period_range('2005', periods=6, freq='A')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.period_range('2011-01-01', periods=3, freq='M')
expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
def test_repeat(self):
# GH 9361
# fixed by # GH 7891
m_idx = MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6), (7, 8)])
data = ['a', 'b', 'c', 'd']
m_df = Series(data, index=m_idx)
assert m_df.repeat(3).shape == (3 * len(data), )
class TestSorted(Base):
""" everything you wanted to test about sorting """
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
assert result.index.names == self.frame.index.names
def test_sorting_repr_8017(self):
np.random.seed(0)
data = np.random.randn(3, 4)
for gen, extra in [([1., 3., 2., 5.], 4.), ([1, 3, 2, 5], 4),
([Timestamp('20130101'), Timestamp('20130103'),
Timestamp('20130102'), Timestamp('20130105')],
Timestamp('20130104')),
(['1one', '3one', '2one', '5one'], '4one')]:
columns = MultiIndex.from_tuples([('red', i) for i in gen])
df = DataFrame(data, index=list('def'), columns=columns)
df2 = pd.concat([df,
DataFrame('world', index=list('def'),
columns=MultiIndex.from_tuples(
[('red', extra)]))], axis=1)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
assert str(df2).splitlines()[0].split() == ['red']
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
tm.assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[('red', extra)] = 'world'
result = result.sort_index(axis=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level(self):
df = self.frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = self.frame['A'].sort_index(level=0)
# preserve names
assert a_sorted.index.names == self.frame.index.names
# inplace
rs = self.frame.copy()
rs.sort_index(level=0, inplace=True)
tm.assert_frame_equal(rs, self.frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sort_index(level=0)
assert result.index.lexsort_depth == 3
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sort_index(level='second')
expected = self.frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self):
sorted_before = self.frame.sort_index(level=1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before,
sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
assert index.is_lexsorted()
index = MultiIndex(levels=levels,
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]])
assert not index.is_lexsorted()
index = MultiIndex(levels=levels,
codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]])
assert not index.is_lexsorted()
assert index.lexsort_depth == 0
def test_sort_index_and_reconstruction(self):
# 15622
# lexsortedness should be identical
# across MultiIndex construction methods
df = DataFrame([[1, 1], [2, 2]], index=list('ab'))
expected = DataFrame([[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples([(0.5, 'a'),
(0.5, 'b'),
(0.8, 'a'),
(0.8, 'b')]))
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list('ab')]))
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(levels=[[0.5, 0.8], ['a', 'b']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]]))
result = result.sort_index()
assert result.index.is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# 14015
df = DataFrame([[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, '20160811 12:00:00'),
(0, '20160809 12:00:00')],
names=['l1', 'Date']))
df.columns.set_levels(pd.to_datetime(df.columns.levels[1]),
level=1,
inplace=True)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
result = df.sort_index(axis=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
result = df.sort_index(axis=1, level=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame({'value': [1, 2, 3, 4]},
index=MultiIndex(
levels=[['a', 'b'], ['bb', 'aa']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]]))
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame({'value': [2, 1, 4, 3]},
index=MultiIndex(
levels=[['a', 'b'], ['aa', 'bb']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]]))
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_reorder_on_ops(self):
# 15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[['a', 'b'], ['big', 'small'], ['red', 'blu']],
names=['letter', 'size', 'color']),
columns=['near', 'far'])
df = df.sort_index()
def my_func(group):
group.index = ['newz', 'newa']
return group
result = df.groupby(level=['letter', 'size']).apply(
my_func).sort_index()
expected = MultiIndex.from_product(
[['a', 'b'], ['big', 'small'], ['newa', 'newz']],
names=['letter', 'size', None])
tm.assert_index_equal(result.index, expected)
def test_sort_non_lexsorted(self):
# degenerate case where we sort but don't
# have a satisfying result :<
# GH 15797
idx = MultiIndex([['A', 'B', 'C'],
['c', 'b', 'a']],
[[0, 1, 2, 0, 1, 2],
[0, 2, 1, 1, 0, 2]])
df = DataFrame({'col': range(len(idx))},
index=idx,
dtype='int64')
assert df.index.is_lexsorted() is False
assert df.index.is_monotonic is False
sorted = df.sort_index()
assert sorted.index.is_lexsorted() is True
assert sorted.index.is_monotonic is True
expected = DataFrame(
{'col': [1, 4, 5, 2]},
index=MultiIndex.from_tuples([('B', 'a'), ('B', 'c'),
('C', 'a'), ('C', 'b')]),
dtype='int64')
result = sorted.loc[pd.IndexSlice['B':'C', 'a':'c'], :]
tm.assert_frame_equal(result, expected)
def test_sort_index_nan(self):
# GH 14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4),
index=mi, columns=list('ABCD'))
s = Series(np.arange(4), index=mi)
df2 = DataFrame({
'date': pd.to_datetime([
'20121002', '20121007', '20130130', '20130202', '20130305',
'20121002', '20121207', '20130130', '20130202', '20130305',
'20130202', '20130305'
]),
'user_id': [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
'whole_cost': [1790, np.nan, 280, 259, np.nan, 623, 90, 312,
np.nan, 301, 359, 801],
'cost': [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12]
}).set_index(['date', 'user_id'])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position='last')
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position='first')
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position='last')
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position='first')
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_ascending_list(self):
# GH: 16934
# Set up a Series with a three level MultiIndex
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'],
[4, 3, 2, 1, 4, 3, 2, 1]]
tuples = zip(*arrays)
mi = MultiIndex.from_tuples(tuples, names=['first', 'second', 'third'])
s = Series(range(8), index=mi)
# Sort with boolean ascending
result = s.sort_index(level=['third', 'first'], ascending=False)
expected = s.iloc[[4, 0, 5, 1, 6, 2, 7, 3]]
tm.assert_series_equal(result, expected)
# Sort with list of boolean ascending
result = s.sort_index(level=['third', 'first'],
ascending=[False, True])
expected = s.iloc[[0, 4, 1, 5, 2, 6, 3, 7]]
tm.assert_series_equal(result, expected)
| bsd-3-clause |
efiring/scipy | scipy/cluster/hierarchy.py | 4 | 94372 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = X.astype(np.bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
try:
if type(R) != np.ndarray:
if name:
raise TypeError(('Variable \'%s\' passed as inconsistency '
'matrix is not a numpy array.') % name)
else:
raise TypeError('Variable passed as inconsistency matrix '
'is not a numpy array.')
if R.dtype != np.double:
if name:
raise TypeError(('Inconsistency matrix \'%s\' must contain '
'doubles (double).') % name)
else:
raise TypeError('Inconsistency matrix must contain doubles '
'(double).')
if len(R.shape) != 2:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have '
'shape=2 (i.e. be two-dimensional).') % name)
else:
raise ValueError('Inconsistency matrix must have shape=2 '
'(i.e. be two-dimensional).')
if R.shape[1] != 4:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have 4 '
'columns.') % name)
else:
raise ValueError('Inconsistency matrix must have 4 columns.')
if R.shape[0] < 1:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have at '
'least one row.') % name)
else:
raise ValueError('Inconsistency matrix must have at least '
'one row.')
if (R[:, 0] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height means.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height means.')
if (R[:, 1] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height standard '
'deviations.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height standard deviations.')
if (R[:, 2] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link counts.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link counts.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
try:
if type(Z) != np.ndarray:
if name:
raise TypeError(('\'%s\' passed as a linkage is not a valid '
'array.') % name)
else:
raise TypeError('Variable is not a valid array.')
if Z.dtype != np.double:
if name:
raise TypeError('Linkage matrix \'%s\' must contain doubles.'
% name)
else:
raise TypeError('Linkage matrix must contain doubles.')
if len(Z.shape) != 2:
if name:
raise ValueError(('Linkage matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Linkage matrix must have shape=2 '
'(i.e. be two-dimensional).')
if Z.shape[1] != 4:
if name:
raise ValueError('Linkage matrix \'%s\' must have 4 columns.'
% name)
else:
raise ValueError('Linkage matrix must have 4 columns.')
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'indices.') % name)
else:
raise ValueError('Linkage contains negative indices.')
if (Z[:, 2] < 0).any():
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'distances.') % name)
else:
raise ValueError('Linkage contains negative distances.')
if (Z[:, 3] < 0).any():
if name:
raise ValueError('Linkage \'%s\' contains negative counts.'
% name)
else:
raise ValueError('Linkage contains negative counts.')
if _check_hierarchy_uses_cluster_before_formed(Z):
if name:
raise ValueError(('Linkage \'%s\' uses non-singleton cluster '
'before its formed.') % name)
else:
raise ValueError("Linkage uses non-singleton cluster before "
"it's formed.")
if _check_hierarchy_uses_cluster_more_than_once(Z):
if name:
raise ValueError(('Linkage \'%s\' uses the same cluster more '
'than once.') % name)
else:
raise ValueError('Linkage uses the same cluster more than '
'once.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(p))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(p))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there is a grouping of links above the color threshold,
# it should go last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None, above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do:
>>> # First define the leaf label function.
>>> def llf(id):
... if id < n:
... return str(id)
... else:
>>> return '[%d %d %1.2f]' % (id, count, R[n-id,3])
>>>
>>> # The text for the leaf nodes is going to be big so force
>>> # a rotation of 90 degrees.
>>> dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example:
>>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
| bsd-3-clause |
Monika319/EWEF-1 | Cw2Rezonans/Karolina/Oscyloskop/OscyloskopZ9W3.py | 1 | 1326 | # -*- coding: utf-8 -*-
"""
Plot oscilloscope files from MultiSim
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from matplotlib import rc
rc('font',family="Consolas")
files=["real_zad9_033f.txt"]
for NazwaPliku in files:
print NazwaPliku
Plik=open(NazwaPliku)
#print DeltaT
Dane=Plik.readlines()#[4:]
DeltaT=float(Dane[2].split()[3].replace(",","."))
#M=len(Dane[4].split())/2
M=2
Dane=Dane[5:]
Plik.close()
print M
Ys=[np.zeros(len(Dane)) for i in range(M)]
for m in range(M):
for i in range(len(Dane)):
try:
Ys[m][i]=float(Dane[i].split()[2+3*m].replace(",","."))
except:
print m, i, 2+3*m, len(Dane[i].split()), Dane[i].split()
#print i, Y[i]
X=np.zeros_like(Ys[0])
for i in range(len(X)):
X[i]=i*DeltaT
for y in Ys:
print max(y)-min(y)
Opis=u"Układ równoległy\nJedna trzecia częstotliwości rezonansowej"
Nazwa=u"Z9W3"
plt.title(u"Przebieg napięciowy\n"+Opis)
plt.xlabel(u"Czas t [s]")
plt.ylabel(u"Napięcie [V]")
plt.plot(X,Ys[0],label=u"Wejście")
plt.plot(X,Ys[1],label=u"Wyjście")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(Nazwa + ".png", bbox_inches='tight')
plt.show()
| gpl-2.0 |
fierval/KaggleMalware | Learning/train_files.py | 2 | 7205 | import os
from os import path
import numpy as np
import csv
import shutil
import sys
from sklearn.cross_validation import train_test_split
from tr_utils import append_to_arr
from zipfile import ZipFile
class TrainFiles(object):
"""
Utilities for dealing with the file system
"""
def __init__(self, train_path = None, val_path = None, labels_file = None, debug = True, test_size = 0.1, floor = 0., cutoff = sys.maxint):
'''
If validate is set to false - don't attempt to match test set to labels
'''
self.train_path = train_path
self.labels_file = labels_file
self._cutoff = cutoff
self._floor = floor
self.labels = None
self.debug = debug
self.validate = (val_path == None) # perform validation if no test set is specified
self.test_size = test_size
self.val_path = val_path
self.isZip = dir != None and path.splitext(train_path)[1] == '.zip'
def __str__(self):
return 'train: {0}, validate: {1}, labels: {2}'.format(self.train_path, self.val_path, self.labels_file)
@property
def cutoff(self):
"""
Max file size to consider when listing directory content
"""
return self._cutoff
@cutoff.setter
def cutoff(self, val):
self._cutoff = val
@property
def floor(self):
return self._floor
@floor.setter
def floor(self, val):
self._floor = val
def get_size(self, file, dir) :
return os.stat(path.join(dir, file)).st_size
def _get_inputs(self, dir):
if not self.isZip:
return filter (lambda x: not path.isdir(path.join(dir, x)) and self.get_size(x, dir) > self.floor and self.get_size(x, dir) <= self.cutoff, os.listdir(dir))
else:
with ZipFile(dir) as zip:
l = zip.infolist()
return map(lambda x: x.filename, filter(lambda x: x.file_size > self.floor and x.file_size <= self.cutoff, l))
def get_training_inputs(self):
"""
retrieves file names (not full path) of files containing training "image" data
"""
return self._get_inputs(self.train_path)
def get_val_inputs(self):
"""
retrieves file names (not full path) of files containing training "image" data
"""
return self._get_inputs(self.val_path)
def get_labels_csv(self):
"""
retrieves the values of each class labels assuming they are stored as the following CSV:
| ID | Class |
"""
with open(self.labels_file, 'rb') as csvlabels:
lablesreader = csv.reader(csvlabels)
file_label = map(lambda x: (x[0], int(x[1])), [(row[0], row[1]) for row in lablesreader][1:])
return file_label
def _connect_labeled_data(self, inp_path, inputs, training):
if self.labels == None:
self.labels = self.get_labels_csv()
X = np.array([])
Y = np.array([])
zip = False
if self.isZip:
zip = ZipFile(inp_path)
for inp in inputs:
inp_file = path.join(inp_path, inp) if not zip else inp
x = np.fromfile(inp_file, dtype='int') if not zip else np.frombuffer(zip.read(inp_file), dtype='int')
x = x.astype('float')
X = append_to_arr(X, x)
if training or self.validate:
label_name = path.splitext(path.split(inp_file)[1])[0]
label = filter(lambda x: x[0] == label_name, self.labels)[0][1]
Y = np.append(Y, label)
if self.debug:
print "Processed: " + inp_file
return X, Y
def _connect_labeled_data_csv(self, inp_path, inputs, training):
if self.labels == None:
self.labels = self.get_labels_csv()
X = np.array([])
Y = np.array([])
for inp in inputs:
inp_file = path.join(inp_path, inp)
x = np.loadtxt(inp_file)
X = append_to_arr(X, x)
if training or self.validate:
label_name = path.splitext(inp)[0]
label = filter(lambda x: x[0] == label_name, self.labels)[0][1]
Y = np.append(Y, label)
if self.debug:
print "Processed: " + inp_file
return X, Y
def _get_inp_input_path(self, training):
inputs = self.get_training_inputs() if training else self.get_val_inputs()
inp_path = self.train_path if training else self.val_path
return inputs, inp_path
def connect_labeled_data(self, training):
"""
Read the training/validation file names and produce two arrays, which once zipped
and iterated over will form a tuple (itemI, classI)
"""
inputs, inp_path = self._get_inp_input_path(training)
return self._connect_labeled_data(inp_path, inputs, training)
def connect_labeled_data_csv(self, training):
"""
Read the training/validation file names and produce two arrays, which once zipped
and iterated over will form a tuple (itemI, classI)
"""
inputs, inp_path = self._get_inp_input_path(training)
return self._connect_labeled_data_csv(inp_path, inputs, training)
def prepare_inputs(self):
"""
Read training, validation, labels and output them
"""
if not self.validate:
X_train, Y_train = self.connect_labeled_data(True)
X_test, Y_test = self.connect_labeled_data(False)
else:
X_train, Y_train = self.connect_labeled_data(True)
X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size = self.test_size, random_state = 1234)
return X_train, Y_train, X_test, Y_test
@staticmethod
def dump_to_csv(csvf, x, y):
with open(csvf, "wb") as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
row = ['Feature ' + str(i) for i in range(0, x[0].size)]
row.append('Class')
csv_writer.writerow(row)
for a in zip(x, y):
row = [f for f in a[0]]
row.append(a[1])
csv_writer.writerow(row)
@staticmethod
def from_csv(csvf, test_size = 0.1):
with open (csvf, "rb") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
skip = csv.Sniffer().has_header(csv_file.read(1024))
X = np.loadtxt(csvf, delimiter = ',', skiprows = 1 if skip else 0)
Y = X[:, -1]
X = X[:, :-1].astype('float')
x, xt, y, yt = train_test_split(X, Y, test_size = test_size, random_state = 1234)
return x, y, xt, yt
def prepare_inputs_csv(self):
if not self.validate:
X_train, Y_train = self.connect_labeled_data_csv(True)
X_test, Y_test = self.connect_labeled_data_csv(False)
else:
X_train, Y_train = self.connect_labeled_data(True)
X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size = self.test_size, random_state = 1234)
return X_train, Y_train, X_test, Y_test | mit |
rrohan/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 71 | 25104 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
madjelan/CostSensitiveClassification | costcla/models/cost_ensemble.py | 1 | 20672 | """
This module include the cost sensitive ensemble methods.
"""
# Authors: Alejandro Correa Bahnsen <[email protected]>
# License: BSD 3 clause
from sklearn.cross_validation import train_test_split
from ..models import CostSensitiveDecisionTreeClassifier
from ..models.bagging import BaggingClassifier
class CostSensitiveRandomForestClassifier(BaggingClassifier):
"""A example-dependent cost-sensitive random forest classifier.
Parameters
----------
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
combination : string, optional (default="majority_voting")
Which combination method to use:
- If "majority_voting" then combine by majority voting
- If "weighted_voting" then combine by weighted voting using the
out of bag savings as the weight for each estimator.
- If "stacking" then a Cost Sensitive Logistic Regression is used
to learn the combination.
- If "stacking_proba" then a Cost Sensitive Logistic Regression trained
with the estimated probabilities is used to learn the combination,.
- If "stacking_bmr" then a Cost Sensitive Logistic Regression is used
to learn the probabilities and a BayesMinimumRisk for the prediction.
- If "stacking_proba_bmr" then a Cost Sensitive Logistic Regression trained
with the estimated probabilities is used to learn the probabilities,
and a BayesMinimumRisk for the prediction.
- If "majority_bmr" then the BayesMinimumRisk algorithm is used to make the
prediction using the predicted probabilities of majority_voting
- If "weighted_bmr" then the BayesMinimumRisk algorithm is used to make the
prediction using the predicted probabilities of weighted_voting
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split in each tree:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
pruned : bool, optional (default=True)
Whenever or not to prune the decision tree using cost-based pruning
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
`base_estimator_`: list of estimators
The base estimator from which the ensemble is grown.
`estimators_`: list of estimators
The collection of fitted base estimators.
`estimators_samples_`: list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
`estimators_features_`: list of arrays
The subset of drawn features for each base estimator.
See also
--------
costcla.models.CostSensitiveDecisionTreeClassifier
References
----------
.. [1] Correa Bahnsen, A., Aouada, D., & Ottersten, B.
`"Ensemble of Example-Dependent Cost-Sensitive Decision Trees" <http://arxiv.org/abs/1505.04637>`__,
2015, http://arxiv.org/abs/1505.04637.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.cross_validation import train_test_split
>>> from costcla.datasets import load_creditscoring1
>>> from costcla.models import CostSensitiveRandomForestClassifier
>>> from costcla.metrics import savings_score
>>> data = load_creditscoring1()
>>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0)
>>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets
>>> y_pred_test_rf = RandomForestClassifier(random_state=0).fit(X_train, y_train).predict(X_test)
>>> f = CostSensitiveRandomForestClassifier()
>>> y_pred_test_csdt = f.fit(X_train, y_train, cost_mat_train).predict(X_test)
>>> # Savings using only RandomForest
>>> print savings_score(y_test, y_pred_test_rf, cost_mat_test)
0.12454256594
>>> # Savings using CostSensitiveRandomForestClassifier
>>> print savings_score(y_test, y_pred_test_csdt, cost_mat_test)
0.499390945808
"""
def __init__(self,
n_estimators=10,
combination='majority_voting',
max_features='auto',
n_jobs=1,
verbose=False,
pruned=False):
super(BaggingClassifier, self).__init__(
base_estimator=CostSensitiveDecisionTreeClassifier(max_features=max_features, pruned=pruned),
n_estimators=n_estimators,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
combination=combination,
n_jobs=n_jobs,
random_state=None,
verbose=verbose)
self.pruned = pruned
class CostSensitiveBaggingClassifier(BaggingClassifier):
"""A example-dependent cost-sensitive bagging classifier.
Parameters
----------
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=0.5)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
combination : string, optional (default="majority_voting")
Which combination method to use:
- If "majority_voting" then combine by majority voting
- If "weighted_voting" then combine by weighted voting using the
out of bag savings as the weight for each estimator.
- If "stacking" then a Cost Sensitive Logistic Regression is used
to learn the combination.
- If "stacking_proba" then a Cost Sensitive Logistic Regression trained
with the estimated probabilities is used to learn the combination,.
- If "stacking_bmr" then a Cost Sensitive Logistic Regression is used
to learn the probabilities and a BayesMinimumRisk for the prediction.
- If "stacking_proba_bmr" then a Cost Sensitive Logistic Regression trained
with the estimated probabilities is used to learn the probabilities,
and a BayesMinimumRisk for the prediction.
- If "majority_bmr" then the BayesMinimumRisk algorithm is used to make the
prediction using the predicted probabilities of majority_voting
- If "weighted_bmr" then the BayesMinimumRisk algorithm is used to make the
prediction using the predicted probabilities of weighted_voting
pruned : bool, optional (default=True)
Whenever or not to prune the decision tree using cost-based pruning
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
`base_estimator_`: list of estimators
The base estimator from which the ensemble is grown.
`estimators_`: list of estimators
The collection of fitted base estimators.
`estimators_samples_`: list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
`estimators_features_`: list of arrays
The subset of drawn features for each base estimator.
See also
--------
costcla.models.CostSensitiveDecisionTreeClassifier
References
----------
.. [1] Correa Bahnsen, A., Aouada, D., & Ottersten, B.
`"Ensemble of Example-Dependent Cost-Sensitive Decision Trees" <http://arxiv.org/abs/1505.04637>`__,
2015, http://arxiv.org/abs/1505.04637.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.cross_validation import train_test_split
>>> from costcla.datasets import load_creditscoring1
>>> from costcla.models import CostSensitiveBaggingClassifier
>>> from costcla.metrics import savings_score
>>> data = load_creditscoring1()
>>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0)
>>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets
>>> y_pred_test_rf = RandomForestClassifier(random_state=0).fit(X_train, y_train).predict(X_test)
>>> f = CostSensitiveBaggingClassifier()
>>> y_pred_test_csdt = f.fit(X_train, y_train, cost_mat_train).predict(X_test)
>>> # Savings using only RandomForest
>>> print savings_score(y_test, y_pred_test_rf, cost_mat_test)
0.12454256594
>>> # Savings using CostSensitiveRandomForestClassifier
>>> print savings_score(y_test, y_pred_test_csdt, cost_mat_test)
0.478964004931
"""
def __init__(self,
n_estimators=10,
max_samples=0.5,
combination='majority_voting',
n_jobs=1,
verbose=False,
pruned=False):
super(BaggingClassifier, self).__init__(
base_estimator=CostSensitiveDecisionTreeClassifier(pruned=pruned),
n_estimators=n_estimators,
max_samples=max_samples,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
combination=combination,
n_jobs=n_jobs,
random_state=None,
verbose=verbose)
self.pruned = pruned
class CostSensitivePastingClassifier(BaggingClassifier):
"""A example-dependent cost-sensitive pasting classifier.
Parameters
----------
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=0.5)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
combination : string, optional (default="majority_voting")
Which combination method to use:
- If "majority_voting" then combine by majority voting
- If "weighted_voting" then combine by weighted voting using the
out of bag savings as the weight for each estimator.
- If "stacking" then a Cost Sensitive Logistic Regression is used
to learn the combination.
- If "stacking_proba" then a Cost Sensitive Logistic Regression trained
with the estimated probabilities is used to learn the combination,.
- If "stacking_bmr" then a Cost Sensitive Logistic Regression is used
to learn the probabilities and a BayesMinimumRisk for the prediction.
- If "stacking_proba_bmr" then a Cost Sensitive Logistic Regression trained
with the estimated probabilities is used to learn the probabilities,
and a BayesMinimumRisk for the prediction.
- If "majority_bmr" then the BayesMinimumRisk algorithm is used to make the
prediction using the predicted probabilities of majority_voting
- If "weighted_bmr" then the BayesMinimumRisk algorithm is used to make the
prediction using the predicted probabilities of weighted_voting
pruned : bool, optional (default=True)
Whenever or not to prune the decision tree using cost-based pruning
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
`base_estimator_`: list of estimators
The base estimator from which the ensemble is grown.
`estimators_`: list of estimators
The collection of fitted base estimators.
`estimators_samples_`: list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
`estimators_features_`: list of arrays
The subset of drawn features for each base estimator.
See also
--------
costcla.models.CostSensitiveDecisionTreeClassifier
References
----------
.. [1] Correa Bahnsen, A., Aouada, D., & Ottersten, B.
`"Ensemble of Example-Dependent Cost-Sensitive Decision Trees" <http://arxiv.org/abs/1505.04637>`__,
2015, http://arxiv.org/abs/1505.04637.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.cross_validation import train_test_split
>>> from costcla.datasets import load_creditscoring1
>>> from costcla.models import CostSensitivePastingClassifier
>>> from costcla.metrics import savings_score
>>> data = load_creditscoring1()
>>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0)
>>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets
>>> y_pred_test_rf = RandomForestClassifier(random_state=0).fit(X_train, y_train).predict(X_test)
>>> f = CostSensitivePastingClassifier()
>>> y_pred_test_csdt = f.fit(X_train, y_train, cost_mat_train).predict(X_test)
>>> # Savings using only RandomForest
>>> print savings_score(y_test, y_pred_test_rf, cost_mat_test)
0.12454256594
>>> # Savings using CostSensitiveRandomForestClassifier
>>> print savings_score(y_test, y_pred_test_csdt, cost_mat_test)
0.479633754848
"""
def __init__(self,
n_estimators=10,
max_samples=0.5,
combination='majority_voting',
n_jobs=1,
verbose=False,
pruned=False):
super(BaggingClassifier, self).__init__(
base_estimator=CostSensitiveDecisionTreeClassifier(pruned=pruned),
n_estimators=n_estimators,
max_samples=max_samples,
max_features=1.0,
bootstrap=False,
bootstrap_features=False,
combination=combination,
n_jobs=n_jobs,
random_state=None,
verbose=verbose)
self.pruned = pruned
class CostSensitiveRandomPatchesClassifier(BaggingClassifier):
"""A example-dependent cost-sensitive pasting classifier.
Parameters
----------
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=0.5)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=0.5)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
combination : string, optional (default="majority_voting")
Which combination method to use:
- If "majority_voting" then combine by majority voting
- If "weighted_voting" then combine by weighted voting using the
out of bag savings as the weight for each estimator.
- If "stacking" then a Cost Sensitive Logistic Regression is used
to learn the combination.
- If "stacking_proba" then a Cost Sensitive Logistic Regression trained
with the estimated probabilities is used to learn the combination,.
- If "stacking_bmr" then a Cost Sensitive Logistic Regression is used
to learn the probabilities and a BayesMinimumRisk for the prediction.
- If "stacking_proba_bmr" then a Cost Sensitive Logistic Regression trained
with the estimated probabilities is used to learn the probabilities,
and a BayesMinimumRisk for the prediction.
- If "majority_bmr" then the BayesMinimumRisk algorithm is used to make the
prediction using the predicted probabilities of majority_voting
- If "weighted_bmr" then the BayesMinimumRisk algorithm is used to make the
prediction using the predicted probabilities of weighted_voting
pruned : bool, optional (default=True)
Whenever or not to prune the decision tree using cost-based pruning
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
`base_estimator_`: list of estimators
The base estimator from which the ensemble is grown.
`estimators_`: list of estimators
The collection of fitted base estimators.
`estimators_samples_`: list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
`estimators_features_`: list of arrays
The subset of drawn features for each base estimator.
See also
--------
costcla.models.CostSensitiveDecisionTreeClassifier
References
----------
.. [1] Correa Bahnsen, A., Aouada, D., & Ottersten, B.
`"Ensemble of Example-Dependent Cost-Sensitive Decision Trees" <http://arxiv.org/abs/1505.04637>`__,
2015, http://arxiv.org/abs/1505.04637.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.cross_validation import train_test_split
>>> from costcla.datasets import load_creditscoring1
>>> from costcla.models import CostSensitiveRandomPatchesClassifier
>>> from costcla.metrics import savings_score
>>> data = load_creditscoring1()
>>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0)
>>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets
>>> y_pred_test_rf = RandomForestClassifier(random_state=0).fit(X_train, y_train).predict(X_test)
>>> f = CostSensitiveRandomPatchesClassifier(combination='weighted_voting')
>>> y_pred_test_csdt = f.fit(X_train, y_train, cost_mat_train).predict(X_test)
>>> # Savings using only RandomForest
>>> print savings_score(y_test, y_pred_test_rf, cost_mat_test)
0.12454256594
>>> # Savings using CostSensitiveRandomForestClassifier
>>> print savings_score(y_test, y_pred_test_csdt, cost_mat_test)
0.499548618518
"""
def __init__(self,
n_estimators=10,
max_samples=0.5,
max_features=0.5,
combination='majority_voting',
n_jobs=1,
verbose=False,
pruned=False):
super(BaggingClassifier, self).__init__(
base_estimator=CostSensitiveDecisionTreeClassifier(pruned=pruned),
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=False,
bootstrap_features=False,
combination=combination,
n_jobs=n_jobs,
random_state=None,
verbose=verbose)
self.pruned = pruned
#TODO not working in parallel, without error
# from costcla.datasets import load_creditscoring1
# data = load_creditscoring1()
# x=data.data
# y=data.target
# c=data.cost_mat
#
# print 'start'
# f = BaggingClassifier(n_estimators=10, verbose=100, n_jobs=2)
# f.fit(x[0:1000],y[0:1000],c[0:1000])
# print 'predict proba'
# f.__setattr__('n_jobs', 4)
# f.predict(x)
# print 'predict END'
| bsd-3-clause |
mrshu/scikit-learn | sklearn/neighbors/unsupervised.py | 4 | 3533 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
warn_on_equidistant : boolean, optional. Defaults to True.
Generate a warning if equidistant neighbors are discarded.
For classification or regression based on k-neighbors, if
neighbor k and neighbor k+1 have identical distances but
different labels, then the result will be dependent on the
ordering of the training data.
If the fit method is ``'kd_tree'``, no warnings will be generated.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
Examples
--------
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
array([[2]])
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30,
warn_on_equidistant=True, p=2):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
warn_on_equidistant=warn_on_equidistant,
p=p)
| bsd-3-clause |
h2educ/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
mhue/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
siutanwong/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
flightgong/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
Winand/pandas | asv_bench/benchmarks/timestamp.py | 2 | 1989 | from .pandas_vb_common import *
from pandas import to_timedelta, Timestamp
import pytz
import datetime
class TimestampProperties(object):
goal_time = 0.2
def setup(self):
self.ts = Timestamp('2017-08-25 08:16:14')
def time_tz(self):
self.ts.tz
def time_offset(self):
self.ts.offset
def time_dayofweek(self):
self.ts.dayofweek
def time_weekday_name(self):
self.ts.weekday_name
def time_dayofyear(self):
self.ts.dayofyear
def time_week(self):
self.ts.week
def time_quarter(self):
self.ts.quarter
def time_days_in_month(self):
self.ts.days_in_month
def time_freqstr(self):
self.ts.freqstr
def time_is_month_start(self):
self.ts.is_month_start
def time_is_month_end(self):
self.ts.is_month_end
def time_is_quarter_start(self):
self.ts.is_quarter_start
def time_is_quarter_end(self):
self.ts.is_quarter_end
def time_is_year_start(self):
self.ts.is_quarter_end
def time_is_year_end(self):
self.ts.is_quarter_end
def time_is_leap_year(self):
self.ts.is_quarter_end
def time_microsecond(self):
self.ts.microsecond
class TimestampOps(object):
goal_time = 0.2
def setup(self):
self.ts = Timestamp('2017-08-25 08:16:14')
self.ts_tz = Timestamp('2017-08-25 08:16:14', tz='US/Eastern')
dt = datetime.datetime(2016, 3, 27, 1)
self.tzinfo = pytz.timezone('CET').localize(dt, is_dst=False).tzinfo
self.ts2 = Timestamp(dt)
def time_replace_tz(self):
self.ts.replace(tzinfo=pytz.timezone('US/Eastern'))
def time_replace_across_dst(self):
self.ts2.replace(tzinfo=self.tzinfo)
def time_replace_None(self):
self.ts_tz.replace(tzinfo=None)
def time_to_pydatetime(self):
self.ts.to_pydatetime()
def time_to_pydatetime_tz(self):
self.ts_tz.to_pydatetime()
| bsd-3-clause |
toobaz/pandas | pandas/tests/arithmetic/test_period.py | 2 | 46915 | # Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
],
)
def test_parr_add_sub_datetime_scalar(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_parr_add_sub_dt64_array_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=3)
dti = pd.date_range("2016-01-01", periods=3)
dtarr = dti.values
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + dtarr
with pytest.raises(TypeError):
dtarr + rng
with pytest.raises(TypeError):
rng - dtarr
with pytest.raises(TypeError):
dtarr - rng
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = pd.period_range("2014-01", "2016-12", freq="M")
expected = pd.period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_parr_add_sub_td64_nat(self, box_transpose_fail):
# GH#23320 special handling for timedelta64("NaT")
box, transpose = box_transpose_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
expected = pd.Series(
[pd.Period("2015-01-02", freq="D"), pd.Period("2015-01-03", freq="D")],
name="xxx",
)
result = ser + pd.Timedelta("1 days")
tm.assert_series_equal(result, expected)
result = pd.Timedelta("1 days") + ser
tm.assert_series_equal(result, expected)
result = ser + pd.tseries.offsets.Day()
tm.assert_series_equal(result, expected)
result = pd.tseries.offsets.Day() + ser
tm.assert_series_equal(result, expected)
def test_ops_series_period(self):
# GH#13043
ser = pd.Series(
[pd.Period("2015-01-01", freq="D"), pd.Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
per = pd.Period("2015-01-10", freq="D")
off = per.freq
# dtype will be object because of original dtype
expected = pd.Series([9 * off, 8 * off], name="xxx", dtype=object)
tm.assert_series_equal(per - ser, expected)
tm.assert_series_equal(ser - per, -1 * expected)
s2 = pd.Series(
[pd.Period("2015-01-05", freq="D"), pd.Period("2015-01-04", freq="D")],
name="xxx",
)
assert s2.dtype == "Period[D]"
expected = pd.Series([4 * off, 2 * off], name="xxx", dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -1 * expected)
class TestPeriodIndexSeriesMethods:
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
tm.assert_equal(result, expected)
ser = pd.Series(values)
result = func(ser)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period("2011-01", freq="M")
off = idx.freq
exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx")
tm.assert_index_equal(result, exp)
result = Period("2011-01", freq="M") - idx
exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name="idx")
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize("ng", ["str", 1.5])
@pytest.mark.parametrize(
"func",
[
lambda obj, ng: obj + ng,
lambda obj, ng: ng + obj,
lambda obj, ng: obj - ng,
lambda obj, ng: ng - obj,
lambda obj, ng: np.add(obj, ng),
lambda obj, ng: np.add(ng, obj),
lambda obj, ng: np.subtract(obj, ng),
lambda obj, ng: np.subtract(ng, obj),
],
)
def test_parr_ops_errors(self, ng, func, box_with_array):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
obj = tm.box_expected(idx, box_with_array)
msg = (
r"unsupported operand type\(s\)|can only concatenate|"
r"must be str|object to str implicitly"
)
with pytest.raises(TypeError, match=msg):
func(obj, ng)
def test_pi_ops_nat(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="2M", name="idx"
)
expected = PeriodIndex(
["2011-07", "2011-08", "NaT", "2011-10"], freq="2M", name="idx"
)
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(
["2011-05", "2011-01", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2010-12", "2010-12", "NaT", "2010-12"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(
["2010-10", "2010-12", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
f = lambda x: x + pd.offsets.Day()
exp = PeriodIndex(
["2011-01-02", "2011-02-02", "2011-03-02", "2011-04-02"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x + pd.offsets.Day(2)
exp = PeriodIndex(
["2011-01-03", "2011-02-03", "2011-03-03", "2011-04-03"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x - pd.offsets.Day(2)
exp = PeriodIndex(
["2010-12-30", "2011-01-30", "2011-02-27", "2011-03-30"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
ser = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
for obj in [idx, ser]:
msg = r"Input has different freq=2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj + pd.offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
pd.offsets.Hour(2) + obj
msg = r"Input has different freq=-2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj - pd.offsets.Hour(2)
def test_pi_sub_period(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
result = idx - pd.Period("2012-01", freq="M")
off = idx.freq
exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period("2012-01", freq="M"))
tm.assert_index_equal(result, exp)
result = pd.Period("2012-01", freq="M") - idx
exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period("2012-01", freq="M"), idx)
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")
tm.assert_index_equal(idx - pd.Period("NaT", freq="M"), exp)
tm.assert_index_equal(pd.Period("NaT", freq="M") - idx, exp)
def test_pi_sub_pdnat(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
exp = pd.TimedeltaIndex([pd.NaT] * 4, name="idx")
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
result = idx - pd.Period("2012-01", freq="M")
off = idx.freq
exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = pd.Period("2012-01", freq="M") - idx
exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name="idx")
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")
tm.assert_index_equal(idx - pd.Period("NaT", freq="M"), exp)
tm.assert_index_equal(pd.Period("NaT", freq="M") - idx, exp)
| bsd-3-clause |
damaggu/SAMRI | samri/report/registration.py | 1 | 3055 | import hashlib
import multiprocessing as mp
import pandas as pd
from os import path
from joblib import Parallel, delayed
from nipype.interfaces import ants, fsl
def measure_sim(path_template, substitutions, reference,
metric="MI",
radius_or_number_of_bins = 8,
sampling_strategy = "None",
sampling_percentage=0.3,
mask="",
):
"""Return a similarity metric score for two 3d images"""
image_path = path_template.format(**substitutions)
image_path = path.abspath(path.expanduser(image_path))
#some BIDS identifier combinations may not exist:
if not path.isfile(image_path):
return {}
file_data = {}
file_data["path"] = image_path
file_data["session"] = substitutions["session"]
file_data["subject"] = substitutions["subject"]
file_data["acquisition"] = substitutions["acquisition"]
if "/func/" in path_template or "/dwi/" in path_template:
image_name = path.basename(file_data["path"])
merged_image_name = "merged_"+image_name
merged_image_path = path.join("/tmp",merged_image_name)
if not path.isfile(merged_image_path):
temporal_mean = fsl.MeanImage()
temporal_mean.inputs.in_file = image_path
temporal_mean.inputs.out_file = merged_image_path
temporal_mean_res = temporal_mean.run()
image_path = temporal_mean_res.outputs.out_file
else:
image_path = merged_image_path
sim = ants.MeasureImageSimilarity()
sim.inputs.dimension = 3
sim.inputs.metric = metric
sim.inputs.fixed_image = reference
sim.inputs.moving_image = image_path
sim.inputs.metric_weight = 1.0
sim.inputs.radius_or_number_of_bins = radius_or_number_of_bins
sim.inputs.sampling_strategy = sampling_strategy
sim.inputs.sampling_percentage = sampling_percentage
if mask:
sim.inputs.fixed_image_mask = mask
#sim.inputs.moving_image_mask = 'mask.nii.gz'
sim_res = sim.run()
file_data["similarity"] = sim_res.outputs.similarity
return file_data
def get_scores(file_template, substitutions, reference,
metric="MI",
radius_or_number_of_bins = 8,
sampling_strategy = "None",
sampling_percentage=0.3,
save_as="",
mask="",
):
"""Create a `pandas.DataFrame` (optionally savable as `.csv`), containing the similarity scores and BIDS identifier fields for images from a BIDS directory.
"""
reference = path.abspath(path.expanduser(reference))
n_jobs = mp.cpu_count()-2
similarity_data = Parallel(n_jobs=n_jobs, verbose=0, backend="threading")(map(delayed(measure_sim),
[file_template]*len(substitutions),
substitutions,
[reference]*len(substitutions),
[metric]*len(substitutions),
[radius_or_number_of_bins]*len(substitutions),
[sampling_strategy]*len(substitutions),
[sampling_percentage]*len(substitutions),
[mask]*len(substitutions),
))
df = pd.DataFrame.from_dict(similarity_data)
df.dropna(axis=0, how='any', inplace=True) #some rows will be emtpy
if save_as:
save_as = path.abspath(path.expanduser(save_as))
if save_as.lower().endswith('.csv'):
df.to_csv(save_as)
else:
raise ValueError("Please specify an output path ending in any one of "+",".join((".csv",))+".")
return df
| gpl-3.0 |
wanderknight/trading-with-python | cookbook/getDataFromYahooFinance.py | 77 | 1391 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 18:37:23 2011
@author: jev
"""
from urllib import urlretrieve
from urllib2 import urlopen
from pandas import Index, DataFrame
from datetime import datetime
import matplotlib.pyplot as plt
sDate = (2005,1,1)
eDate = (2011,10,1)
symbol = 'SPY'
fName = symbol+'.csv'
try: # try to load saved csv file, otherwise get from the net
fid = open(fName)
lines = fid.readlines()
fid.close()
print 'Loaded from ' , fName
except Exception as e:
print e
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
print 'Downloading from ', urlStr
urlretrieve(urlStr,symbol+'.csv')
lines = urlopen(urlStr).readlines()
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
df.plot(secondary_y=['volume'])
| bsd-3-clause |
abhishekgahlot/scikit-learn | sklearn/decomposition/nmf.py | 15 | 19010 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
Remarks
-------
This implements the algorithm described in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
Reference
---------
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
dsm054/pandas | pandas/tests/io/test_stata.py | 2 | 64716 | # -*- coding: utf-8 -*-
# pylint: disable=E1101
import datetime as dt
import io
import gzip
import os
import struct
import warnings
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
from pandas.compat import iterkeys
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import (InvalidColumnName, PossiblePrecisionLoss,
StataMissingValue, StataReader, read_stata)
@pytest.fixture
def dirpath(datapath):
return datapath("io", "data")
@pytest.fixture
def parsed_114(dirpath):
dta14_114 = os.path.join(dirpath, 'stata5_114.dta')
parsed_114 = read_stata(dta14_114, convert_dates=True)
parsed_114.index.name = 'index'
return parsed_114
class TestStata(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "data")
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
self.dta22_118 = os.path.join(self.dirpath, 'stata14_118.dta')
self.dta23 = os.path.join(self.dirpath, 'stata15.dta')
self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta')
self.dta25_118 = os.path.join(self.dirpath, 'stata16_118.dta')
self.stata_dates = os.path.join(self.dirpath, 'stata13_dates.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
@pytest.mark.parametrize('version', [114, 117])
def test_read_empty_dta(self, version):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path, write_index=False, version=version)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
with StataReader(self.dta1_114) as rdr:
with tm.assert_produces_warning(UserWarning):
parsed_114_data = rdr.data()
with StataReader(self.dta1_114) as rdr:
parsed_114_read = rdr.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
@pytest.mark.parametrize(
'file', ['dta1_114', 'dta1_117'])
def test_read_dta1(self, file):
file = getattr(self, file)
parsed = self.read_dta(file)
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed, expected)
def test_read_dta2(self):
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
assert len(w) == 3
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_115, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_117, expected,
check_datetimelike_compat=True)
@pytest.mark.parametrize(
'file', ['dta3_113', 'dta3_114', 'dta3_115', 'dta3_117'])
def test_read_dta3(self, file):
file = getattr(self, file)
parsed = self.read_dta(file)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize(
'file', ['dta4_113', 'dta4_114', 'dta4_115', 'dta4_117'])
def test_read_dta4(self, file):
file = getattr(self, file)
parsed = self.read_dta(file)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category')
for col in expected], axis=1)
# stata doesn't save .category metadata
tm.assert_frame_equal(parsed, expected, check_categorical=False)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_dta18(self):
parsed_118 = self.read_dta(self.dta22_118)
parsed_118["Bytes"] = parsed_118["Bytes"].astype('O')
expected = DataFrame.from_records(
[['Cat', 'Bogota', u'Bogotá', 1, 1.0, u'option b Ünicode', 1.0],
['Dog', 'Boston', u'Uzunköprü', np.nan, np.nan, np.nan, np.nan],
['Plane', 'Rome', u'Tromsø', 0, 0.0, 'option a', 0.0],
['Potato', 'Tokyo', u'Elâzığ', -4, 4.0, 4, 4],
['', '', '', 0, 0.3332999, 'option a', 1 / 3.]
],
columns=['Things', 'Cities', 'Unicode_Cities_Strl',
'Ints', 'Floats', 'Bytes', 'Longs'])
expected["Floats"] = expected["Floats"].astype(np.float32)
for col in parsed_118.columns:
tm.assert_almost_equal(parsed_118[col], expected[col])
with StataReader(self.dta22_118) as rdr:
vl = rdr.variable_labels()
vl_expected = {u'Unicode_Cities_Strl':
u'Here are some strls with Ünicode chars',
u'Longs': u'long data',
u'Things': u'Here are some things',
u'Bytes': u'byte data',
u'Ints': u'int data',
u'Cities': u'Here are some cities',
u'Floats': u'float data'}
tm.assert_dict_equal(vl, vl_expected)
assert rdr.data_label == u'This is a Ünicode data label'
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original, check_index_type=False)
@pytest.mark.parametrize('version', [114, 117])
def test_read_write_dta10(self, version):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer',
'floating', 'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'}, version=version)
written_and_read_again = self.read_dta(path)
# original.index is np.int32, read index is np.int64
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original, check_index_type=False)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5, 4), columns=list('abcd'))
df.loc[2, 'a':'c'] = np.nan
df_copy = df.copy()
with tm.ensure_clean() as path:
df.to_stata(path, write_index=False)
tm.assert_frame_equal(df, df_copy)
@pytest.mark.parametrize('version', [114, 117])
def test_encoding(self, version):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
with tm.assert_produces_warning(FutureWarning):
encoded = read_stata(self.dta_encoding, encoding='latin-1')
result = encoded.kreis1849[0]
expected = raw.kreis1849[0]
assert result == expected
assert isinstance(result, compat.string_types)
with tm.ensure_clean() as path:
with tm.assert_produces_warning(FutureWarning):
encoded.to_stata(path, write_index=False, version=version,
encoding='latin-1')
reread_encoded = read_stata(path)
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number',
'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number',
'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with tm.assert_produces_warning(pd.io.stata.InvalidColumnName):
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), formatted)
@pytest.mark.parametrize('version', [114, 117])
def test_read_write_dta12(self, version):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', InvalidColumnName)
original.to_stata(path, None, version=version)
# should get a warning for that format.
assert len(w) == 1
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2 ** 9, dtype=np.int16)
s2 = Series(2 ** 17, dtype=np.int32)
s3 = Series(2 ** 33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
@pytest.mark.parametrize('version', [114, 117])
@pytest.mark.parametrize(
'file', ['dta14_113', 'dta14_114', 'dta14_115', 'dta14_117'])
def test_read_write_reread_dta14(self, file, parsed_114, version):
file = getattr(self, file)
parsed = self.read_dta(file)
parsed.index.name = 'index'
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col]._convert(datetime=True, numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(
expected['date_td'], errors='coerce')
tm.assert_frame_equal(parsed_114, parsed)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'}, version=version)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), parsed_114)
@pytest.mark.parametrize(
'file', ['dta15_113', 'dta15_114', 'dta15_115', 'dta15_117'])
def test_read_write_reread_dta15(self, file):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(
datetime.strptime, args=('%Y-%m-%d',))
file = getattr(self, file)
parsed = self.read_dta(file)
tm.assert_frame_equal(expected, parsed)
@pytest.mark.parametrize('version', [114, 117])
def test_timestamp_and_label(self, version):
original = DataFrame([(1,)], columns=['variable'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp,
data_label=data_label,
version=version)
with StataReader(path) as reader:
assert reader.time_stamp == '29 Feb 2000 14:21'
assert reader.data_label == data_label
@pytest.mark.parametrize('version', [114, 117])
def test_invalid_timestamp(self, version):
original = DataFrame([(1,)], columns=['variable'])
time_stamp = '01 Jan 2000, 00:00:00'
with tm.ensure_clean() as path:
with pytest.raises(ValueError):
original.to_stata(path, time_stamp=time_stamp,
version=version)
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
@pytest.mark.parametrize('version', [114, 117])
def test_nan_to_missing_value(self, version):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
pytest.raises(
KeyError, lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with tm.assert_produces_warning(PossiblePrecisionLoss):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path, {0: 'tc'})
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_105(self):
# Data obtained from:
# http://go.worldbank.org/ZXY29PVJ21
dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta')
df = pd.read_stata(dpath)
df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]]
df0 = pd.DataFrame(df0)
df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"]
df0['clustnum'] = df0["clustnum"].astype(np.int16)
df0['pri_schl'] = df0["pri_schl"].astype(np.int8)
df0['psch_num'] = df0["psch_num"].astype(np.int8)
df0['psch_dis'] = df0["psch_dis"].astype(np.float32)
tm.assert_frame_equal(df.head(3), df0)
def test_value_labels_old_format(self):
# GH 19417
#
# Test that value_labels() returns an empty dict if the file format
# predates supporting value labels.
dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta')
reader = StataReader(dpath)
assert reader.value_labels() == {}
reader.close()
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = {c: c for c in columns}
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
@pytest.mark.parametrize('version', [114, 117])
@pytest.mark.parametrize('byteorder', ['>', '<'])
def test_bool_uint(self, byteorder, version):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path, byteorder=byteorder, version=version)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
with StataReader(self.dta16_115) as rdr:
sr_115 = rdr.variable_labels()
with StataReader(self.dta16_117) as rdr:
sr_117 = rdr.variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k, v in compat.iteritems(sr_115):
assert k in sr_117
assert v == sr_117[k]
assert k in keys
assert v in labels
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len,
'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
with StataReader(path) as sr:
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
assert int(variable[1:]) == int(fmt[1:-1])
assert int(variable[1:]) == typ
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len,
'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with pytest.raises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b', 'h', 'l')
df = DataFrame([[0.0]], columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
with StataReader(path) as rdr:
valid_range = rdr.VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0, 27):
val = StataMissingValue(offset + 1 + i)
assert val.string == expected_values[i]
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f', b'\x00\x00\x00\x7f')[0])
assert val.string == '.'
val = StataMissingValue(struct.unpack('<f', b'\x00\xd0\x00\x7f')[0])
assert val.string == '.z'
# Test extremes for floats
val = StataMissingValue(struct.unpack(
'<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
assert val.string == '.'
val = StataMissingValue(struct.unpack(
'<d', b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
assert val.string == '.z'
@pytest.mark.parametrize(
'file', ['dta17_113', 'dta17_115', 'dta17_117'])
def test_missing_value_conversion(self, file):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data, columns=columns)
parsed = read_stata(getattr(self, file), convert_missing=True)
tm.assert_frame_equal(parsed, expected)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([pd.NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999, 12, 24)
expected[2][3] = datetime(9999, 12, 1)
expected[2][4] = datetime(9999, 10, 1)
expected[2][5] = datetime(9999, 7, 1)
expected[4][2] = datetime(2262, 4, 16)
expected[4][3] = expected[4][4] = datetime(2262, 4, 1)
expected[4][5] = expected[4][6] = datetime(2262, 1, 1)
expected[5][2] = expected[5][3] = expected[
5][4] = datetime(1677, 10, 1)
expected[5][5] = expected[5][6] = datetime(1678, 1, 1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115,
check_datetimelike_compat=True)
tm.assert_frame_equal(expected, parsed_117,
check_datetimelike_compat=True)
date_conversion = {c: c[-2:] for c in columns}
# {c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected,
check_datetimelike_compat=True)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
# See PR 10757
columns = ['int_', 'long_', 'byte_']
expected = expected[columns]
reordered = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, reordered)
with pytest.raises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with pytest.raises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
@pytest.mark.parametrize('version', [114, 117])
@pytest.mark.filterwarnings(
"ignore:\\nStata value:pandas.io.stata.ValueLabelTypeMismatch"
)
def test_categorical_writing(self, version):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
expected['incompletely_labeled'] = expected[
'incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category')
for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index('index')
tm.assert_frame_equal(res, expected, check_categorical=False)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
with tm.ensure_clean() as path:
pytest.raises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
with tm.assert_produces_warning(pd.io.stata.ValueLabelTypeMismatch):
original.to_stata(path)
# should get a warning for mixed content
@pytest.mark.parametrize('version', [114, 117])
def test_categorical_with_stata_missing_values(self, version):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index('index')
tm.assert_frame_equal(res, original, check_categorical=False)
@pytest.mark.parametrize(
'file', ['dta19_115', 'dta19_117'])
def test_categorical_order(self, file):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
(True, 'reverse', ['a', 'b', 'c',
'd', 'e'], np.arange(5)[::-1]),
(True, 'noorder', ['a', 'b', 'c', 'd',
'e'], np.array([2, 1, 4, 0, 3])),
(True, 'floating', [
'a', 'b', 'c', 'd', 'e'], np.arange(0, 5)),
(True, 'float_missing', [
'a', 'd', 'e'], np.array([0, 1, 2, -1, -1])),
(False, 'nolabel', [
1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'],
np.arange(5))]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_dict(OrderedDict(cols))
# Read with and with out categoricals, ensure order is identical
file = getattr(self, file)
parsed = read_stata(file)
tm.assert_frame_equal(expected, parsed, check_categorical=False)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
parsed[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
parsed[col].cat.categories)
@pytest.mark.parametrize(
'file', ['dta20_115', 'dta20_117'])
def test_categorical_sorting(self, file):
parsed = read_stata(getattr(self, file))
# Sort based on codes, not strings
parsed = parsed.sort_values("srh", na_position='first')
# Don't sort index
parsed.index = np.arange(parsed.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
cat = pd.Categorical.from_codes(codes=codes, categories=categories)
expected = pd.Series(cat, name='srh')
tm.assert_series_equal(expected, parsed["srh"],
check_categorical=False)
@pytest.mark.parametrize(
'file', ['dta19_115', 'dta19_117'])
def test_categorical_ordering(self, file):
file = getattr(self, file)
parsed = read_stata(file)
parsed_unordered = read_stata(file,
order_categoricals=False)
for col in parsed:
if not is_categorical_dtype(parsed[col]):
continue
assert parsed[col].cat.ordered
assert not parsed_unordered[col].cat.ordered
@pytest.mark.parametrize(
'file', ['dta1_117', 'dta2_117', 'dta3_117',
'dta4_117', 'dta14_117', 'dta15_117',
'dta16_117', 'dta17_117', 'dta18_117',
'dta19_117', 'dta20_117'])
@pytest.mark.parametrize(
'chunksize', [1, 2])
@pytest.mark.parametrize(
'convert_categoricals', [False, True])
@pytest.mark.parametrize(
'convert_dates', [False, True])
def test_read_chunks_117(self, file, chunksize,
convert_categoricals, convert_dates):
fname = getattr(self, file)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(
fname, iterator=True,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False,
check_datetimelike_compat=True,
check_categorical=False)
pos += chunksize
itr.close()
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
with read_stata(fname, iterator=True) as itr:
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
with read_stata(fname, iterator=True) as itr:
chunk = itr.get_chunk(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = itr.get_chunk()
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
# GH12153
with read_stata(fname, chunksize=4) as itr:
from_chunks = pd.concat(itr)
tm.assert_frame_equal(parsed, from_chunks)
@pytest.mark.parametrize(
'file', ['dta2_115', 'dta3_115', 'dta4_115',
'dta14_115', 'dta15_115', 'dta16_115',
'dta17_115', 'dta18_115', 'dta19_115',
'dta20_115'])
@pytest.mark.parametrize(
'chunksize', [1, 2])
@pytest.mark.parametrize(
'convert_categoricals', [False, True])
@pytest.mark.parametrize(
'convert_dates', [False, True])
def test_read_chunks_115(self, file, chunksize,
convert_categoricals, convert_dates):
fname = getattr(self, file)
# Read the whole file
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
# Compare to what we get when reading by chunk
itr = read_stata(
fname, iterator=True,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False,
check_datetimelike_compat=True,
check_categorical=False)
pos += chunksize
itr.close()
def test_read_chunks_columns(self):
fname = self.dta3_117
columns = ['quarter', 'cpi', 'm1']
chunksize = 2
parsed = read_stata(fname, columns=columns)
with read_stata(fname, iterator=True) as itr:
pos = 0
for j in range(5):
chunk = itr.read(chunksize, columns=columns)
if chunk is None:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
@pytest.mark.parametrize('version', [114, 117])
def test_write_variable_labels(self, version):
# GH 13631, add support for writing variable labels
original = pd.DataFrame({'a': [1, 2, 3, 4],
'b': [1.0, 3.0, 27.0, 81.0],
'c': ['Atlanta', 'Birmingham',
'Cincinnati', 'Detroit']})
original.index.name = 'index'
variable_labels = {'a': 'City Rank', 'b': 'City Exponent', 'c': 'City'}
with tm.ensure_clean() as path:
original.to_stata(path,
variable_labels=variable_labels,
version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
expected_labels = {'index': '',
'a': 'City Rank',
'b': 'City Exponent',
'c': 'City'}
assert read_labels == expected_labels
variable_labels['index'] = 'The Index'
with tm.ensure_clean() as path:
original.to_stata(path,
variable_labels=variable_labels,
version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
assert read_labels == variable_labels
@pytest.mark.parametrize('version', [114, 117])
def test_invalid_variable_labels(self, version):
original = pd.DataFrame({'a': [1, 2, 3, 4],
'b': [1.0, 3.0, 27.0, 81.0],
'c': ['Atlanta', 'Birmingham',
'Cincinnati', 'Detroit']})
original.index.name = 'index'
variable_labels = {'a': 'very long' * 10,
'b': 'City Exponent',
'c': 'City'}
with tm.ensure_clean() as path:
with pytest.raises(ValueError):
original.to_stata(path,
variable_labels=variable_labels,
version=version)
variable_labels['a'] = u'invalid character Œ'
with tm.ensure_clean() as path:
with pytest.raises(ValueError):
original.to_stata(path,
variable_labels=variable_labels,
version=version)
def test_write_variable_label_errors(self):
original = pd.DataFrame({'a': [1, 2, 3, 4],
'b': [1.0, 3.0, 27.0, 81.0],
'c': ['Atlanta', 'Birmingham',
'Cincinnati', 'Detroit']})
values = [u'\u03A1', u'\u0391',
u'\u039D', u'\u0394',
u'\u0391', u'\u03A3']
variable_labels_utf8 = {'a': 'City Rank',
'b': 'City Exponent',
'c': u''.join(values)}
with pytest.raises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels_utf8)
variable_labels_long = {'a': 'City Rank',
'b': 'City Exponent',
'c': 'A very, very, very long variable label '
'that is too long for Stata which means '
'that it has more than 80 characters'}
with pytest.raises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels_long)
def test_default_date_conversion(self):
# GH 12259
dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000)]
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
reread = read_stata(path, convert_dates=True)
tm.assert_frame_equal(original, reread)
original.to_stata(path,
write_index=False,
convert_dates={'dates': 'tc'})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
dates_idx = original.columns.tolist().index('dates')
original.to_stata(path,
write_index=False,
convert_dates={dates_idx: 'tc'})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
def test_unsupported_type(self):
original = pd.DataFrame({'a': [1 + 2j, 2 + 4j]})
with pytest.raises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_unsupported_datetype(self):
dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000)]
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with pytest.raises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path, convert_dates={'dates': 'tC'})
dates = pd.date_range('1-1-1990', periods=3, tz='Asia/Hong_Kong')
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with pytest.raises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_repeated_column_labels(self):
# GH 13923
with pytest.raises(ValueError) as cm:
read_stata(self.dta23, convert_categoricals=True)
assert 'wolof' in cm.exception
def test_stata_111(self):
# 111 is an old version but still used by current versions of
# SAS when exporting to Stata format. We do not know of any
# on-line documentation for this version.
df = read_stata(self.dta24_111)
original = pd.DataFrame({'y': [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0],
'x': [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6],
'w': [2, np.NaN, 5, 2, 4, 4, 3, 1, 2, 3],
'z': ['a', 'b', 'c', 'd', 'e', '', 'g', 'h',
'i', 'j']})
original = original[['y', 'x', 'w', 'z']]
tm.assert_frame_equal(original, df)
def test_out_of_range_double(self):
# GH 14618
df = DataFrame({'ColumnOk': [0.0,
np.finfo(np.double).eps,
4.49423283715579e+307],
'ColumnTooBig': [0.0,
np.finfo(np.double).eps,
np.finfo(np.double).max]})
with pytest.raises(ValueError) as cm:
with tm.ensure_clean() as path:
df.to_stata(path)
assert 'ColumnTooBig' in cm.exception
df.loc[2, 'ColumnTooBig'] = np.inf
with pytest.raises(ValueError) as cm:
with tm.ensure_clean() as path:
df.to_stata(path)
assert 'ColumnTooBig' in cm.exception
assert 'infinity' in cm.exception
def test_out_of_range_float(self):
original = DataFrame({'ColumnOk': [0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max / 10.0],
'ColumnTooBig': [0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max]})
original.index.name = 'index'
for col in original:
original[col] = original[col].astype(np.float32)
with tm.ensure_clean() as path:
original.to_stata(path)
reread = read_stata(path)
original['ColumnTooBig'] = original['ColumnTooBig'].astype(
np.float64)
tm.assert_frame_equal(original,
reread.set_index('index'))
original.loc[2, 'ColumnTooBig'] = np.inf
with pytest.raises(ValueError) as cm:
with tm.ensure_clean() as path:
original.to_stata(path)
assert 'ColumnTooBig' in cm.exception
assert 'infinity' in cm.exception
def test_path_pathlib(self):
df = tm.makeDataFrame()
df.index.name = 'index'
reader = lambda x: read_stata(x).set_index('index')
result = tm.round_trip_pathlib(df.to_stata, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
df = tm.makeDataFrame()
df.index.name = 'index'
reader = lambda x: read_stata(x).set_index('index')
result = tm.round_trip_localpath(df.to_stata, reader)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize(
'write_index', [True, False])
def test_value_labels_iterator(self, write_index):
# GH 16923
d = {'A': ['B', 'E', 'C', 'A', 'E']}
df = pd.DataFrame(data=d)
df['A'] = df['A'].astype('category')
with tm.ensure_clean() as path:
df.to_stata(path, write_index=write_index)
with pd.read_stata(path, iterator=True) as dta_iter:
value_labels = dta_iter.value_labels()
assert value_labels == {'A': {0: 'A', 1: 'B', 2: 'C', 3: 'E'}}
def test_set_index(self):
# GH 17328
df = tm.makeDataFrame()
df.index.name = 'index'
with tm.ensure_clean() as path:
df.to_stata(path)
reread = pd.read_stata(path, index_col='index')
tm.assert_frame_equal(df, reread)
@pytest.mark.parametrize(
'column', ['ms', 'day', 'week', 'month', 'qtr', 'half', 'yr'])
def test_date_parsing_ignores_format_details(self, column):
# GH 17797
#
# Test that display formats are ignored when determining if a numeric
# column is a date value.
#
# All date types are stored as numbers and format associated with the
# column denotes both the type of the date and the display format.
#
# STATA supports 9 date types which each have distinct units. We test 7
# of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that
# accounts for leap seconds and %tb relies on STATAs business calendar.
df = read_stata(self.stata_dates)
unformatted = df.loc[0, column]
formatted = df.loc[0, column + "_fmt"]
assert unformatted == formatted
def test_writer_117(self):
original = DataFrame(data=[['string', 'object', 1, 1, 1, 1.1, 1.1,
np.datetime64('2003-12-25'),
'a', 'a' * 2045, 'a' * 5000, 'a'],
['string-1', 'object-1', 1, 1, 1, 1.1, 1.1,
np.datetime64('2003-12-26'),
'b', 'b' * 2045, '', '']
],
columns=['string', 'object', 'int8', 'int16',
'int32', 'float32', 'float64',
'datetime',
's1', 's2045', 'srtl', 'forced_strl'])
original['object'] = Series(original['object'], dtype=object)
original['int8'] = Series(original['int8'], dtype=np.int8)
original['int16'] = Series(original['int16'], dtype=np.int16)
original['int32'] = original['int32'].astype(np.int32)
original['float32'] = Series(original['float32'], dtype=np.float32)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
copy = original.copy()
with tm.ensure_clean() as path:
original.to_stata(path,
convert_dates={'datetime': 'tc'},
convert_strl=['forced_strl'],
version=117)
written_and_read_again = self.read_dta(path)
# original.index is np.int32, read index is np.int64
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original, check_index_type=False)
tm.assert_frame_equal(original, copy)
def test_convert_strl_name_swap(self):
original = DataFrame([['a' * 3000, 'A', 'apple'],
['b' * 1000, 'B', 'banana']],
columns=['long1' * 10, 'long', 1])
original.index.name = 'index'
with tm.assert_produces_warning(pd.io.stata.InvalidColumnName):
with tm.ensure_clean() as path:
original.to_stata(path, convert_strl=['long', 1], version=117)
reread = self.read_dta(path)
reread = reread.set_index('index')
reread.columns = original.columns
tm.assert_frame_equal(reread, original,
check_index_type=False)
def test_invalid_date_conversion(self):
# GH 12259
dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000)]
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with tm.ensure_clean() as path:
with pytest.raises(ValueError):
original.to_stata(path,
convert_dates={'wrong_name': 'tc'})
@pytest.mark.parametrize('version', [114, 117])
def test_nonfile_writing(self, version):
# GH 21041
bio = io.BytesIO()
df = tm.makeDataFrame()
df.index.name = 'index'
with tm.ensure_clean() as path:
df.to_stata(bio, version=version)
bio.seek(0)
with open(path, 'wb') as dta:
dta.write(bio.read())
reread = pd.read_stata(path, index_col='index')
tm.assert_frame_equal(df, reread)
def test_gzip_writing(self):
# writing version 117 requires seek and cannot be used with gzip
df = tm.makeDataFrame()
df.index.name = 'index'
with tm.ensure_clean() as path:
with gzip.GzipFile(path, 'wb') as gz:
df.to_stata(gz, version=114)
with gzip.GzipFile(path, 'rb') as gz:
reread = pd.read_stata(gz, index_col='index')
tm.assert_frame_equal(df, reread)
def test_unicode_dta_118(self):
unicode_df = self.read_dta(self.dta25_118)
columns = ['utf8', 'latin1', 'ascii', 'utf8_strl', 'ascii_strl']
values = [[u'ραηδας', u'PÄNDÄS', 'p', u'ραηδας', 'p'],
[u'ƤĀńĐąŜ', u'Ö', 'a', u'ƤĀńĐąŜ', 'a'],
[u'ᴘᴀᴎᴅᴀS', u'Ü', 'n', u'ᴘᴀᴎᴅᴀS', 'n'],
[' ', ' ', 'd', ' ', 'd'],
[' ', '', 'a', ' ', 'a'],
['', '', 's', '', 's'],
['', '', ' ', '', ' ']]
expected = pd.DataFrame(values, columns=columns)
tm.assert_frame_equal(unicode_df, expected)
| bsd-3-clause |
Barmaley-exe/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 39 | 4706 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
"""Check lasso stability path"""
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
"""Check randomized lasso"""
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
"""Check randomized sparse logistic regression"""
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
"""Check randomized sparse logistic regression on sparse data"""
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/datasets/plot_iris_dataset.py | 8 | 1902 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
pl.figure(2, figsize=(8, 6))
pl.clf()
# Plot the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = pl.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=pl.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
pl.show()
| bsd-3-clause |
petebachant/seaborn | seaborn/linearmodels.py | 14 | 57371 | """Plotting functions for linear models (broadly construed)."""
from __future__ import division
import copy
import itertools
from textwrap import dedent
import numpy as np
import pandas as pd
from scipy.spatial import distance
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
try:
import statsmodels
assert statsmodels
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .external.six import string_types
from .external.six.moves import range
from . import utils
from . import algorithms as algo
from .palettes import color_palette
from .axisgrid import FacetGrid, PairGrid, _facet_docs
from .distributions import kdeplot
class _LinearPlotter(object):
"""Base class for plotting relational data in tidy format.
To get anything useful done you'll have to inherit from this, but setup
code that can be abstracted out should be put here.
"""
def establish_variables(self, data, **kws):
"""Extract variables from data or use directly."""
self.data = data
# Validate the inputs
any_strings = any([isinstance(v, string_types) for v in kws.values()])
if any_strings and data is None:
raise ValueError("Must pass `data` if using named variables.")
# Set the variables
for var, val in kws.items():
if isinstance(val, string_types):
setattr(self, var, data[val])
else:
setattr(self, var, val)
def dropna(self, *vars):
"""Remove observations with missing data."""
vals = [getattr(self, var) for var in vars]
vals = [v for v in vals if v is not None]
not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)
for var in vars:
val = getattr(self, var)
if val is not None:
setattr(self, var, val[not_na])
def plot(self, ax):
raise NotImplementedError
class _RegressionPlotter(_LinearPlotter):
"""Plotter for numeric independent variables with regression model.
This does the computations and drawing for the `regplot` function, and
is thus also used indirectly by `lmplot`.
"""
def __init__(self, x, y, data=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
color=None, label=None):
# Set member attributes
self.x_estimator = x_estimator
self.ci = ci
self.x_ci = ci if x_ci == "ci" else x_ci
self.n_boot = n_boot
self.scatter = scatter
self.fit_reg = fit_reg
self.order = order
self.logistic = logistic
self.lowess = lowess
self.robust = robust
self.logx = logx
self.truncate = truncate
self.x_jitter = x_jitter
self.y_jitter = y_jitter
self.color = color
self.label = label
# Validate the regression options:
if sum((order > 1, logistic, robust, lowess, logx)) > 1:
raise ValueError("Mutually exclusive regression options.")
# Extract the data vals from the arguments or passed dataframe
self.establish_variables(data, x=x, y=y, units=units,
x_partial=x_partial, y_partial=y_partial)
# Drop null observations
if dropna:
self.dropna("x", "y", "units", "x_partial", "y_partial")
# Regress nuisance variables out of the data
if self.x_partial is not None:
self.x = self.regress_out(self.x, self.x_partial)
if self.y_partial is not None:
self.y = self.regress_out(self.y, self.y_partial)
# Possibly bin the predictor variable, which implies a point estimate
if x_bins is not None:
self.x_estimator = np.mean if x_estimator is None else x_estimator
x_discrete, x_bins = self.bin_predictor(x_bins)
self.x_discrete = x_discrete
else:
self.x_discrete = self.x
# Save the range of the x variable for the grid later
self.x_range = self.x.min(), self.x.max()
@property
def scatter_data(self):
"""Data where each observation is a point."""
x_j = self.x_jitter
if x_j is None:
x = self.x
else:
x = self.x + np.random.uniform(-x_j, x_j, len(self.x))
y_j = self.y_jitter
if y_j is None:
y = self.y
else:
y = self.y + np.random.uniform(-y_j, y_j, len(self.y))
return x, y
@property
def estimate_data(self):
"""Data with a point estimate and CI for each discrete x value."""
x, y = self.x_discrete, self.y
vals = sorted(np.unique(x))
points, cis = [], []
for val in vals:
# Get the point estimate of the y variable
_y = y[x == val]
est = self.x_estimator(_y)
points.append(est)
# Compute the confidence interval for this estimate
if self.x_ci is None:
cis.append(None)
else:
units = None
if self.units is not None:
units = self.units[x == val]
boots = algo.bootstrap(_y, func=self.x_estimator,
n_boot=self.n_boot, units=units)
_ci = utils.ci(boots, self.x_ci)
cis.append(_ci)
return vals, points, cis
def fit_regression(self, ax=None, x_range=None, grid=None):
"""Fit the regression model."""
# Create the grid for the regression
if grid is None:
if self.truncate:
x_min, x_max = self.x_range
else:
if ax is None:
x_min, x_max = x_range
else:
x_min, x_max = ax.get_xlim()
grid = np.linspace(x_min, x_max, 100)
ci = self.ci
# Fit the regression
if self.order > 1:
yhat, yhat_boots = self.fit_poly(grid, self.order)
elif self.logistic:
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
yhat, yhat_boots = self.fit_statsmodels(grid, GLM,
family=Binomial())
elif self.lowess:
ci = None
grid, yhat = self.fit_lowess()
elif self.robust:
from statsmodels.robust.robust_linear_model import RLM
yhat, yhat_boots = self.fit_statsmodels(grid, RLM)
elif self.logx:
yhat, yhat_boots = self.fit_logx(grid)
else:
yhat, yhat_boots = self.fit_fast(grid)
# Compute the confidence interval at each grid point
if ci is None:
err_bands = None
else:
err_bands = utils.ci(yhat_boots, ci, axis=0)
return grid, yhat, err_bands
def fit_fast(self, grid):
"""Low-level regression and prediction using linear algebra."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
reg_func = lambda _x, _y: np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def fit_poly(self, grid, order):
"""Regression using numpy polyfit for higher-order trends."""
x, y = self.x, self.y
reg_func = lambda _x, _y: np.polyval(np.polyfit(_x, _y, order), grid)
yhat = reg_func(x, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(x, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_statsmodels(self, grid, model, **kwargs):
"""More general regression function using statsmodels objects."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
reg_func = lambda _x, _y: model(_y, _x, **kwargs).fit().predict(grid)
yhat = reg_func(X, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_lowess(self):
"""Fit a locally-weighted regression, which returns its own grid."""
from statsmodels.nonparametric.smoothers_lowess import lowess
grid, yhat = lowess(self.y, self.x).T
return grid, yhat
def fit_logx(self, grid):
"""Fit the model in log-space."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), np.log(grid)]
def reg_func(_x, _y):
_x = np.c_[_x[:, 0], np.log(_x[:, 1])]
return np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def bin_predictor(self, bins):
"""Discretize a predictor by assigning value to closest bin."""
x = self.x
if np.isscalar(bins):
percentiles = np.linspace(0, 100, bins + 2)[1:-1]
bins = np.c_[utils.percentiles(x, percentiles)]
else:
bins = np.c_[np.ravel(bins)]
dist = distance.cdist(np.c_[x], bins)
x_binned = bins[np.argmin(dist, axis=1)].ravel()
return x_binned, bins.ravel()
def regress_out(self, a, b):
"""Regress b from a keeping a's original mean."""
a_mean = a.mean()
a = a - a_mean
b = b - b.mean()
b = np.c_[b]
a_prime = a - b.dot(np.linalg.pinv(b).dot(a))
return (a_prime + a_mean).reshape(a.shape)
def plot(self, ax, scatter_kws, line_kws):
"""Draw the full plot."""
# Insert the plot label into the correct set of keyword arguments
if self.scatter:
scatter_kws["label"] = self.label
else:
line_kws["label"] = self.label
# Use the current color cycle state as a default
if self.color is None:
lines, = plt.plot(self.x.mean(), self.y.mean())
color = lines.get_color()
lines.remove()
else:
color = self.color
# Let color in keyword arguments override overall plot color
scatter_kws.setdefault("color", color)
line_kws.setdefault("color", color)
# Draw the constituent plots
if self.scatter:
self.scatterplot(ax, scatter_kws)
if self.fit_reg:
self.lineplot(ax, line_kws)
# Label the axes
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
def scatterplot(self, ax, kws):
"""Draw the data."""
# Treat the line-based markers specially, explicitly setting larger
# linewidth than is provided by the seaborn style defaults.
# This would ideally be handled better in matplotlib (i.e., distinguish
# between edgewidth for solid glyphs and linewidth for line glyphs
# but this should do for now.
line_markers = ["1", "2", "3", "4", "+", "x", "|", "_"]
if self.x_estimator is None:
if "marker" in kws and kws["marker"] in line_markers:
lw = mpl.rcParams["lines.linewidth"]
else:
lw = mpl.rcParams["lines.markeredgewidth"]
kws.setdefault("linewidths", lw)
if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:
kws.setdefault("alpha", .8)
x, y = self.scatter_data
ax.scatter(x, y, **kws)
else:
# TODO abstraction
ci_kws = {"color": kws["color"]}
ci_kws["linewidth"] = mpl.rcParams["lines.linewidth"] * 1.75
kws.setdefault("s", 50)
xs, ys, cis = self.estimate_data
if [ci for ci in cis if ci is not None]:
for x, ci in zip(xs, cis):
ax.plot([x, x], ci, **ci_kws)
ax.scatter(xs, ys, **kws)
def lineplot(self, ax, kws):
"""Draw the model."""
xlim = ax.get_xlim()
# Fit the regression model
grid, yhat, err_bands = self.fit_regression(ax)
# Get set default aesthetics
fill_color = kws["color"]
lw = kws.pop("lw", mpl.rcParams["lines.linewidth"] * 1.5)
kws.setdefault("linewidth", lw)
# Draw the regression line and confidence interval
ax.plot(grid, yhat, **kws)
if err_bands is not None:
ax.fill_between(grid, *err_bands, color=fill_color, alpha=.15)
ax.set_xlim(*xlim)
_regression_docs = dict(
model_api=dedent("""\
There are a number of mutually exclusive options for estimating the
regression model: ``order``, ``logistic``, ``lowess``, ``robust``, and
``logx``. See the parameter docs for more information on these options.\
"""),
regplot_vs_lmplot=dedent("""\
Understanding the difference between :func:`regplot` and :func:`lmplot` can
be a bit tricky. In fact, they are closely related, as :func:`lmplot` uses
:func:`regplot` internally and takes most of its parameters. However,
:func:`regplot` is an axes-level function, so it draws directly onto an
axes (either the currently active axes or the one provided by the ``ax``
parameter), while :func:`lmplot` is a figure-level function and creates its
own figure, which is managed through a :class:`FacetGrid`. This has a few
consequences, namely that :func:`regplot` can happily coexist in a figure
with other kinds of plots and will follow the global matplotlib color
cycle. In contrast, :func:`lmplot` needs to occupy an entire figure, and
the size and color cycle are controlled through function parameters,
ignoring the global defaults.\
"""),
x_estimator=dedent("""\
x_estimator : callable that maps vector -> scalar, optional
Apply this function to each unique value of ``x`` and plot the
resulting estimate. This is useful when ``x`` is a discrete variable.
If ``x_ci`` is not ``None``, this estimate will be bootstrapped and a
confidence interval will be drawn.\
"""),
x_bins=dedent("""\
x_bins : int or vector, optional
Bin the ``x`` variable into discrete bins and then estimate the central
tendency and a confidence interval. This binning only influences how
the scatterplot is drawn; the regression is still fit to the original
data. This parameter is interpreted either as the number of
evenly-sized (not necessary spaced) bins or the positions of the bin
centers. When this parameter is used, it implies that the default of
``x_estimator`` is ``numpy.mean``.\
"""),
x_ci=dedent("""\
x_ci : "ci", int in [0, 100] or None, optional
Size of the confidence interval used when plotting a central tendency
for discrete values of ``x``. If "ci", defer to the value of the``ci``
parameter.\
"""),
scatter=dedent("""\
scatter : bool, optional
If ``True``, draw a scatterplot with the underlying observations (or
the ``x_estimator`` values).\
"""),
fit_reg=dedent("""\
fit_reg : bool, optional
If ``True``, estimate and plot a regression model relating the ``x``
and ``y`` variables.\
"""),
ci=dedent("""\
ci : int in [0, 100] or None, optional
Size of the confidence interval for the regression estimate. This will
be drawn using translucent bands around the regression line. The
confidence interval is estimated using a bootstrap; for large
datasets, it may be advisable to avoid that computation by setting
this parameter to None.\
"""),
n_boot=dedent("""\
n_boot : int, optional
Number of bootstrap resamples used to estimate the ``ci``. The default
value attempts to balance time and stability; you may want to increase
this value for "final" versions of plots.\
"""),
units=dedent("""\
units : variable name in ``data``, optional
If the ``x`` and ``y`` observations are nested within sampling units,
those can be specified here. This will be taken into account when
computing the confidence intervals by performing a multilevel bootstrap
that resamples both units and observations (within unit). This does not
otherwise influence how the regression is estimated or drawn.\
"""),
order=dedent("""\
order : int, optional
If ``order`` is greater than 1, use ``numpy.polyfit`` to estimate a
polynomial regression.\
"""),
logistic=dedent("""\
logistic : bool, optional
If ``True``, assume that ``y`` is a binary variable and use
``statsmodels`` to estimate a logistic regression model. Note that this
is substantially more computationally intensive than linear regression,
so you may wish to decrease the number of bootstrap resamples
(``n_boot``) or set ``ci`` to None.\
"""),
lowess=dedent("""\
lowess : bool, optional
If ``True``, use ``statsmodels`` to estimate a nonparametric lowess
model (locally weighted linear regression). Note that confidence
intervals cannot currently be drawn for this kind of model.\
"""),
robust=dedent("""\
robust : bool, optional
If ``True``, use ``statsmodels`` to estimate a robust regression. This
will de-weight outliers. Note that this is substantially more
computationally intensive than standard linear regression, so you may
wish to decrease the number of bootstrap resamples (``n_boot``) or set
``ci`` to None.\
"""),
logx=dedent("""\
logx : bool, optional
If ``True``, estimate a linear regression of the form y ~ log(x), but
plot the scatterplot and regression model in the input space. Note that
``x`` must be positive for this to work.\
"""),
xy_partial=dedent("""\
{x,y}_partial : strings in ``data`` or matrices
Confounding variables to regress out of the ``x`` or ``y`` variables
before plotting.\
"""),
truncate=dedent("""\
truncate : bool, optional
By default, the regression line is drawn to fill the x axis limits
after the scatterplot is drawn. If ``truncate`` is ``True``, it will
instead by bounded by the data limits.\
"""),
xy_jitter=dedent("""\
{x,y}_jitter : floats, optional
Add uniform random noise of this size to either the ``x`` or ``y``
variables. The noise is added to a copy of the data after fitting the
regression, and only influences the look of the scatterplot. This can
be helpful when plotting variables that take discrete values.\
"""),
scatter_line_kws=dedent("""\
{scatter,line}_kws : dictionaries
Additional keyword arguments to pass to ``plt.scatter`` and
``plt.plot``.\
"""),
)
_regression_docs.update(_facet_docs)
def lmplot(x, y, data, hue=None, col=None, row=None, palette=None,
col_wrap=None, size=5, aspect=1, markers="o", sharex=True,
sharey=True, hue_order=None, col_order=None, row_order=None,
legend=True, legend_out=True, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None, truncate=False,
x_jitter=None, y_jitter=None, scatter_kws=None, line_kws=None):
# Reduce the dataframe to only needed columns
need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
cols = np.unique([a for a in need_cols if a is not None]).tolist()
data = data[cols]
# Initialize the grid
facets = FacetGrid(data, row, col, hue, palette=palette,
row_order=row_order, col_order=col_order,
hue_order=hue_order, size=size, aspect=aspect,
col_wrap=col_wrap, sharex=sharex, sharey=sharey,
legend_out=legend_out)
# Add the markers here as FacetGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if facets.hue_names is None:
n_markers = 1
else:
n_markers = len(facets.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singeton or a list of markers "
"for each level of the hue variable"))
facets.hue_kws = {"marker": markers}
# Hack to set the x limits properly, which needs to happen here
# because the extent of the regression estimate is determined
# by the limits of the plot
if sharex:
for ax in facets.axes.flat:
ax.scatter(data[x], np.ones(len(data)) * data[y].mean()).remove()
# Draw the regression plot on each facet
regplot_kws = dict(
x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,
order=order, logistic=logistic, lowess=lowess, robust=robust,
logx=logx, x_partial=x_partial, y_partial=y_partial, truncate=truncate,
x_jitter=x_jitter, y_jitter=y_jitter,
scatter_kws=scatter_kws, line_kws=line_kws,
)
facets.map_dataframe(regplot, x, y, **regplot_kws)
# Add a legend
if legend and (hue is not None) and (hue not in [col, row]):
facets.add_legend()
return facets
lmplot.__doc__ = dedent("""\
Plot data and regression model fits across a FacetGrid.
This function combines :func:`regplot` and :class:`FacetGrid`. It is
intended as a convenient interface to fit regression models across
conditional subsets of a dataset.
When thinking about how to assign variables to different facets, a general
rule is that it makes sense to use ``hue`` for the most important
comparison, followed by ``col`` and ``row``. However, always think about
your particular dataset and the goals of the visualization you are
creating.
{model_api}
The parameters to this function span most of the options in
:class:`FacetGrid`, although there may be occasional cases where you will
want to use that class and :func:`regplot` directly.
Parameters
----------
x, y : strings, optional
Input variables; these should be column names in ``data``.
{data}
hue, col, row : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``*_order`` parameters to control
the order of levels of this variable.
{palette}
{col_wrap}
{size}
{aspect}
markers : matplotlib marker code or list of marker codes, optional
Markers for the scatterplot. If a list, each marker in the list will be
used for each level of the ``hue`` variable.
{share_xy}
{{hue,col,row}}_order : lists, optional
Order for the levels of the faceting variables. By default, this will
be the order that the levels appear in ``data`` or, if the variables
are pandas categoricals, the category order.
legend : bool, optional
If ``True`` and there is a ``hue`` variable, add a legend.
{legend_out}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
{scatter_line_kws}
See Also
--------
regplot : Plot data and a conditional model fit.
FacetGrid : Subplot grid for plotting conditional relationships.
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
Notes
-----
{regplot_vs_lmplot}
Examples
--------
These examples focus on basic regression model plots to exhibit the
various faceting options; see the :func:`regplot` docs for demonstrations
of the other options for plotting the data and models. There are also
other examples for how to manipulate plot using the returned object on
the :class:`FacetGrid` docs.
Plot a simple linear relationship between two variables:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.lmplot(x="total_bill", y="tip", data=tips)
Condition on a third variable and plot the levels in different colors:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips)
Use different markers as well as colors so the plot will reproduce to
black-and-white more easily:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... markers=["o", "x"])
Use a different color palette:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette="Set1")
Map ``hue`` levels to colors with a dictionary:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette=dict(Yes="g", No="m"))
Plot the levels of the third variable across different columns:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="smoker", data=tips)
Change the size and aspect ratio of the facets:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="size", y="total_bill", hue="day", col="day",
... data=tips, aspect=.4, x_jitter=.1)
Wrap the levels of the column variable into multiple rows:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="day", hue="day",
... data=tips, col_wrap=2, size=3)
Condition on two variables to make a full grid:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, size=3)
Use methods on the returned :class:`FacetGrid` instance to further tweak
the plot:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, size=3)
>>> g = (g.set_axis_labels("Total bill (US Dollars)", "Tip")
... .set(xlim=(0, 60), ylim=(0, 12),
... xticks=[10, 30, 50], yticks=[2, 6, 10])
... .fig.subplots_adjust(wspace=.02))
""").format(**_regression_docs)
def regplot(x, y, data=None, x_estimator=None, x_bins=None, x_ci="ci",
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
label=None, color=None, marker="o",
scatter_kws=None, line_kws=None, ax=None):
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
regplot.__doc__ = dedent("""\
Plot data and a linear regression model fit.
{model_api}
Parameters
----------
x, y: string, series, or vector array
Input variables. If strings, these should correspond with column names
in ``data``. When pandas objects are used, axes will be labeled with
the series name.
{data}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
label : string
Label to apply to ether the scatterplot or regression line (if
``scatter`` is ``False``) for use in a legend.
color : matplotlib color
Color to apply to all plot elements; will be superseded by colors
passed in ``scatter_kws`` or ``line_kws``.
marker : matplotlib marker code
Marker to use for the scatterplot glyphs.
{scatter_line_kws}
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
Returns
-------
ax : matplotlib Axes
The Axes object containing the plot.
See Also
--------
lmplot : Combine :func:`regplot` and :class:`FacetGrid` to plot multiple
linear relationships in a dataset.
jointplot : Combine :func:`regplot` and :class:`JointGrid` (when used with
``kind="reg"``).
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
residplot : Plot the residuals of a linear regression model.
interactplot : Plot a two-way interaction between continuous variables
Notes
-----
{regplot_vs_lmplot}
It's also easy to combine combine :func:`regplot` and :class:`JointGrid` or
:class:`PairGrid` through the :func:`jointplot` and :func:`pairplot`
functions, although these do not directly accept all of :func:`regplot`'s
parameters.
Examples
--------
Plot the relationship between two variables in a DataFrame:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> ax = sns.regplot(x="total_bill", y="tip", data=tips)
Plot with two variables defined as numpy arrays; use a different color:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(8)
>>> mean, cov = [4, 6], [(1.5, .7), (.7, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 80).T
>>> ax = sns.regplot(x=x, y=y, color="g")
Plot with two variables defined as pandas Series; use a different marker:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x, y = pd.Series(x, name="x_var"), pd.Series(y, name="y_var")
>>> ax = sns.regplot(x=x, y=y, marker="+")
Use a 68% confidence interval, which corresponds with the standard error
of the estimate:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, ci=68)
Plot with a discrete ``x`` variable and add some jitter:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips, x_jitter=.1)
Plot with a discrete ``x`` variable showing means and confidence intervals
for unique values:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean)
Plot with a continuous variable divided into discrete bins:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, x_bins=4)
Fit a higher-order polynomial regression and truncate the model prediction:
.. plot::
:context: close-figs
>>> ans = sns.load_dataset("anscombe")
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "II"],
... scatter_kws={{"s": 80}},
... order=2, ci=None, truncate=True)
Fit a robust regression and don't plot a confidence interval:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "III"],
... scatter_kws={{"s": 80}},
... robust=True, ci=None)
Fit a logistic regression; jitter the y variable and use fewer bootstrap
iterations:
.. plot::
:context: close-figs
>>> tips["big_tip"] = (tips.tip / tips.total_bill) > .175
>>> ax = sns.regplot(x="total_bill", y="big_tip", data=tips,
... logistic=True, n_boot=500, y_jitter=.03)
Fit the regression model using log(x) and truncate the model prediction:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean, logx=True, truncate=True)
""").format(**_regression_docs)
def residplot(x, y, data=None, lowess=False, x_partial=None, y_partial=None,
order=1, robust=False, dropna=True, label=None, color=None,
scatter_kws=None, line_kws=None, ax=None):
"""Plot the residuals of a linear regression.
This function will regress y on x (possibly as a robust or polynomial
regression) and then draw a scatterplot of the residuals. You can
optionally fit a lowess smoother to the residual plot, which can
help in determining if there is structure to the residuals.
Parameters
----------
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
lowess : boolean, optional
Fit a lowess smoother to the residual scatterplot.
{x, y}_partial : matrix or string(s) , optional
Matrix with same first dimension as `x`, or column name(s) in `data`.
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
order : int, optional
Order of the polynomial to fit when calculating the residuals.
robust : boolean, optional
Fit a robust linear regression when calculating the residuals.
dropna : boolean, optional
If True, ignore observations with missing data when fitting and
plotting.
label : string, optional
Label that will be used in any plot legends.
color : matplotlib color, optional
Color to use for all elements of the plot.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
regplot : Plot a simple linear regression model.
jointplot (with kind="resid"): Draw a residplot with univariate
marginal distrbutions.
"""
plotter = _RegressionPlotter(x, y, data, ci=None,
order=order, robust=robust,
x_partial=x_partial, y_partial=y_partial,
dropna=dropna, color=color, label=label)
if ax is None:
ax = plt.gca()
# Calculate the residual from a linear regression
_, yhat, _ = plotter.fit_regression(grid=plotter.x)
plotter.y = plotter.y - yhat
# Set the regression option on the plotter
if lowess:
plotter.lowess = True
else:
plotter.fit_reg = False
# Plot a horizontal line at 0
ax.axhline(0, ls=":", c=".2")
# Draw the scatterplot
scatter_kws = {} if scatter_kws is None else scatter_kws
line_kws = {} if line_kws is None else line_kws
plotter.plot(ax, scatter_kws, line_kws)
return ax
def coefplot(formula, data, groupby=None, intercept=False, ci=95,
palette="husl"):
"""Plot the coefficients from a linear model.
Parameters
----------
formula : string
patsy formula for ols model
data : dataframe
data for the plot; formula terms must appear in columns
groupby : grouping object, optional
object to group data with to fit conditional models
intercept : bool, optional
if False, strips the intercept term before plotting
ci : float, optional
size of confidence intervals
palette : seaborn color palette, optional
palette for the horizonal plots
"""
if not _has_statsmodels:
raise ImportError("The `coefplot` function requires statsmodels")
import statsmodels.formula.api as sf
alpha = 1 - ci / 100
if groupby is None:
coefs = sf.ols(formula, data).fit().params
cis = sf.ols(formula, data).fit().conf_int(alpha)
else:
grouped = data.groupby(groupby)
coefs = grouped.apply(lambda d: sf.ols(formula, d).fit().params).T
cis = grouped.apply(lambda d: sf.ols(formula, d).fit().conf_int(alpha))
# Possibly ignore the intercept
if not intercept:
coefs = coefs.ix[1:]
n_terms = len(coefs)
# Plot seperately depending on groupby
w, h = mpl.rcParams["figure.figsize"]
hsize = lambda n: n * (h / 2)
wsize = lambda n: n * (w / (4 * (n / 5)))
if groupby is None:
colors = itertools.cycle(color_palette(palette, n_terms))
f, ax = plt.subplots(1, 1, figsize=(wsize(n_terms), hsize(1)))
for i, term in enumerate(coefs.index):
color = next(colors)
low, high = cis.ix[term]
ax.plot([i, i], [low, high], c=color,
solid_capstyle="round", lw=2.5)
ax.plot(i, coefs.ix[term], "o", c=color, ms=8)
ax.set_xlim(-.5, n_terms - .5)
ax.axhline(0, ls="--", c="dimgray")
ax.set_xticks(range(n_terms))
ax.set_xticklabels(coefs.index)
else:
n_groups = len(coefs.columns)
f, axes = plt.subplots(n_terms, 1, sharex=True,
figsize=(wsize(n_groups), hsize(n_terms)))
if n_terms == 1:
axes = [axes]
colors = itertools.cycle(color_palette(palette, n_groups))
for ax, term in zip(axes, coefs.index):
for i, group in enumerate(coefs.columns):
color = next(colors)
low, high = cis.ix[(group, term)]
ax.plot([i, i], [low, high], c=color,
solid_capstyle="round", lw=2.5)
ax.plot(i, coefs.loc[term, group], "o", c=color, ms=8)
ax.set_xlim(-.5, n_groups - .5)
ax.axhline(0, ls="--", c="dimgray")
ax.set_title(term)
ax.set_xlabel(groupby)
ax.set_xticks(range(n_groups))
ax.set_xticklabels(coefs.columns)
def interactplot(x1, x2, y, data=None, filled=False, cmap="RdBu_r",
colorbar=True, levels=30, logistic=False,
contour_kws=None, scatter_kws=None, ax=None, **kwargs):
"""Visualize a continuous two-way interaction with a contour plot.
Parameters
----------
x1, x2, y, strings or array-like
Either the two independent variables and the dependent variable,
or keys to extract them from `data`
data : DataFrame
Pandas DataFrame with the data in the columns.
filled : bool
Whether to plot with filled or unfilled contours
cmap : matplotlib colormap
Colormap to represent yhat in the countour plot.
colorbar : bool
Whether to draw the colorbar for interpreting the color values.
levels : int or sequence
Number or position of contour plot levels.
logistic : bool
Fit a logistic regression model instead of linear regression.
contour_kws : dictionary
Keyword arguments for contour[f]().
scatter_kws : dictionary
Keyword arguments for plot().
ax : matplotlib axis
Axis to draw plot in.
Returns
-------
ax : Matplotlib axis
Axis with the contour plot.
"""
if not _has_statsmodels:
raise ImportError("The `interactplot` function requires statsmodels")
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
# Handle the form of the data
if data is not None:
x1 = data[x1]
x2 = data[x2]
y = data[y]
if hasattr(x1, "name"):
xlabel = x1.name
else:
xlabel = None
if hasattr(x2, "name"):
ylabel = x2.name
else:
ylabel = None
if hasattr(y, "name"):
clabel = y.name
else:
clabel = None
x1 = np.asarray(x1)
x2 = np.asarray(x2)
y = np.asarray(y)
# Initialize the scatter keyword dictionary
if scatter_kws is None:
scatter_kws = {}
if not ("color" in scatter_kws or "c" in scatter_kws):
scatter_kws["color"] = "#222222"
if "alpha" not in scatter_kws:
scatter_kws["alpha"] = 0.75
# Intialize the contour keyword dictionary
if contour_kws is None:
contour_kws = {}
# Initialize the axis
if ax is None:
ax = plt.gca()
# Plot once to let matplotlib sort out the axis limits
ax.plot(x1, x2, "o", **scatter_kws)
# Find the plot limits
x1min, x1max = ax.get_xlim()
x2min, x2max = ax.get_ylim()
# Make the grid for the contour plot
x1_points = np.linspace(x1min, x1max, 100)
x2_points = np.linspace(x2min, x2max, 100)
xx1, xx2 = np.meshgrid(x1_points, x2_points)
# Fit the model with an interaction
X = np.c_[np.ones(x1.size), x1, x2, x1 * x2]
if logistic:
lm = GLM(y, X, family=Binomial()).fit()
else:
lm = OLS(y, X).fit()
# Evaluate the model on the grid
eval = np.vectorize(lambda x1_, x2_: lm.predict([1, x1_, x2_, x1_ * x2_]))
yhat = eval(xx1, xx2)
# Default color limits put the midpoint at mean(y)
y_bar = y.mean()
c_min = min(np.percentile(y, 2), yhat.min())
c_max = max(np.percentile(y, 98), yhat.max())
delta = max(c_max - y_bar, y_bar - c_min)
c_min, cmax = y_bar - delta, y_bar + delta
contour_kws.setdefault("vmin", c_min)
contour_kws.setdefault("vmax", c_max)
# Draw the contour plot
func_name = "contourf" if filled else "contour"
contour = getattr(ax, func_name)
c = contour(xx1, xx2, yhat, levels, cmap=cmap, **contour_kws)
# Draw the scatter again so it's visible
ax.plot(x1, x2, "o", **scatter_kws)
# Draw a colorbar, maybe
if colorbar:
bar = plt.colorbar(c)
# Label the axes
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if clabel is not None and colorbar:
clabel = "P(%s)" % clabel if logistic else clabel
bar.set_label(clabel, labelpad=15, rotation=270)
return ax
def corrplot(data, names=None, annot=True, sig_stars=True, sig_tail="both",
sig_corr=True, cmap=None, cmap_range=None, cbar=True,
diag_names=True, method=None, ax=None, **kwargs):
"""Plot a correlation matrix with colormap and r values.
NOTE: This function is deprecated in favor of :func:`heatmap` and will
be removed in a forthcoming release.
Parameters
----------
data : Dataframe or nobs x nvars array
Rectangular input data with variabes in the columns.
names : sequence of strings
Names to associate with variables if `data` is not a DataFrame.
annot : bool
Whether to annotate the upper triangle with correlation coefficients.
sig_stars : bool
If True, get significance with permutation test and denote with stars.
sig_tail : both | upper | lower
Direction for significance test. Also controls the default colorbar.
sig_corr : bool
If True, use FWE-corrected p values for the sig stars.
cmap : colormap
Colormap name as string or colormap object.
cmap_range : None, "full", (low, high)
Either truncate colormap at (-max(abs(r)), max(abs(r))), use the
full range (-1, 1), or specify (min, max) values for the colormap.
cbar : bool
If true, plot the colorbar legend.
method: None (pearson) | kendall | spearman
Correlation method to compute pairwise correlations. Methods other
than the default pearson correlation will not have a significance
computed.
ax : matplotlib axis
Axis to draw plot in.
kwargs : other keyword arguments
Passed to ax.matshow()
Returns
-------
ax : matplotlib axis
Axis object with plot.
"""
warnings.warn(("The `corrplot` function has been deprecated in favor "
"of `heatmap` and will be removed in a forthcoming "
"release. Please update your code."))
if not isinstance(data, pd.DataFrame):
if names is None:
names = ["var_%d" % i for i in range(data.shape[1])]
data = pd.DataFrame(data, columns=names, dtype=np.float)
# Calculate the correlation matrix of the dataframe
if method is None:
corrmat = data.corr()
else:
corrmat = data.corr(method=method)
# Pandas will drop non-numeric columns; let's keep track of that operation
names = corrmat.columns
data = data[names]
# Get p values with a permutation test
if annot and sig_stars and method is None:
p_mat = algo.randomize_corrmat(data.values.T, sig_tail, sig_corr)
else:
p_mat = None
# Sort out the color range
if cmap_range is None:
triu = np.triu_indices(len(corrmat), 1)
vmax = min(1, np.max(np.abs(corrmat.values[triu])) * 1.15)
vmin = -vmax
if sig_tail == "both":
cmap_range = vmin, vmax
elif sig_tail == "upper":
cmap_range = 0, vmax
elif sig_tail == "lower":
cmap_range = vmin, 0
elif cmap_range == "full":
cmap_range = (-1, 1)
# Find a colormapping, somewhat intelligently
if cmap is None:
if min(cmap_range) >= 0:
cmap = "OrRd"
elif max(cmap_range) <= 0:
cmap = "PuBu_r"
else:
cmap = "coolwarm"
if cmap == "jet":
# Paternalism
raise ValueError("Never use the 'jet' colormap!")
# Plot using the more general symmatplot function
ax = symmatplot(corrmat, p_mat, names, cmap, cmap_range,
cbar, annot, diag_names, ax, **kwargs)
return ax
def symmatplot(mat, p_mat=None, names=None, cmap="Greys", cmap_range=None,
cbar=True, annot=True, diag_names=True, ax=None, **kwargs):
"""Plot a symmetric matrix with colormap and statistic values.
NOTE: This function is deprecated in favor of :func:`heatmap` and will
be removed in a forthcoming release.
"""
warnings.warn(("The `symmatplot` function has been deprecated in favor "
"of `heatmap` and will be removed in a forthcoming "
"release. Please update your code."))
if ax is None:
ax = plt.gca()
nvars = len(mat)
if isinstance(mat, pd.DataFrame):
plotmat = mat.values.copy()
mat = mat.values
else:
plotmat = mat.copy()
plotmat[np.triu_indices(nvars)] = np.nan
if cmap_range is None:
vmax = np.nanmax(plotmat) * 1.15
vmin = np.nanmin(plotmat) * 1.15
elif len(cmap_range) == 2:
vmin, vmax = cmap_range
else:
raise ValueError("cmap_range argument not understood")
mat_img = ax.matshow(plotmat, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)
if cbar:
plt.colorbar(mat_img, shrink=.75)
if p_mat is None:
p_mat = np.ones((nvars, nvars))
if annot:
for i, j in zip(*np.triu_indices(nvars, 1)):
val = mat[i, j]
stars = utils.sig_stars(p_mat[i, j])
ax.text(j, i, "\n%.2g\n%s" % (val, stars),
fontdict=dict(ha="center", va="center"))
else:
fill = np.ones_like(plotmat)
fill[np.tril_indices_from(fill, -1)] = np.nan
ax.matshow(fill, cmap="Greys", vmin=0, vmax=0, zorder=2)
if names is None:
names = ["var%d" % i for i in range(nvars)]
if diag_names:
for i, name in enumerate(names):
ax.text(i, i, name, fontdict=dict(ha="center", va="center",
weight="bold", rotation=45))
ax.set_xticklabels(())
ax.set_yticklabels(())
else:
ax.xaxis.set_ticks_position("bottom")
xnames = names if annot else names[:-1]
ax.set_xticklabels(xnames, rotation=90)
ynames = names if annot else names[1:]
ax.set_yticklabels(ynames)
minor_ticks = np.linspace(-.5, nvars - 1.5, nvars)
ax.set_xticks(minor_ticks, True)
ax.set_yticks(minor_ticks, True)
major_ticks = np.linspace(0, nvars - 1, nvars)
xticks = major_ticks if annot else major_ticks[:-1]
ax.set_xticks(xticks)
yticks = major_ticks if annot else major_ticks[1:]
ax.set_yticks(yticks)
ax.grid(False, which="major")
ax.grid(True, which="minor", linestyle="-")
return ax
def pairplot(data, hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="hist", markers=None,
size=2.5, aspect=1, dropna=True,
plot_kws=None, diag_kws=None, grid_kws=None):
"""Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each
variable in ``data`` will by shared in the y-axis across a single row and
in the x-axis across a single column. The diagonal Axes are treated
differently, drawing a plot to show the univariate distribution of the data
for the variable in that column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name), optional
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names, optional
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names, optional
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'reg'}, optional
Kind of plot for the non-identity relationships.
diag_kind : {'hist', 'kde'}, optional
Kind of plot for the diagonal subplots.
markers : single matplotlib marker code or list, optional
Either the marker to use for all datapoints or a list of markers with
a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
size : scalar, optional
Height (in inches) of each facet.
aspect : scalar, optional
Aspect * size gives the width (in inches) of each facet.
dropna : boolean, optional
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts, optional
Dictionaries of keyword arguments.
Returns
-------
grid : PairGrid
Returns the underlying ``PairGrid`` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise
relationships.
Examples
--------
Draw scatterplots for joint relationships and histograms for univariate
distributions:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(style="ticks", color_codes=True)
>>> iris = sns.load_dataset("iris")
>>> g = sns.pairplot(iris)
Show different levels of a categorical variable by the color of plot
elements:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, hue="species")
Use a different color palette:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, hue="species", palette="husl")
Use different markers for each level of the hue variable:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, hue="species", markers=["o", "s", "D"])
Plot a subset of variables:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, vars=["sepal_width", "sepal_length"])
Draw larger plots:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, size=3,
... vars=["sepal_width", "sepal_length"])
Plot different variables in the rows and columns:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris,
... x_vars=["sepal_width", "sepal_length"],
... y_vars=["petal_width", "petal_length"])
Use kernel density estimates for univariate plots:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, diag_kind="kde")
Fit linear regression models to the scatter plots:
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, kind="reg")
Pass keyword arguments down to the underlying functions (it may be easier
to use :class:`PairGrid` directly):
.. plot::
:context: close-figs
>>> g = sns.pairplot(iris, diag_kind="kde", markers="+",
... plot_kws=dict(s=50, edgecolor="b", linewidth=1),
... diag_kws=dict(shade=True))
"""
if plot_kws is None:
plot_kws = {}
if diag_kws is None:
diag_kws = {}
if grid_kws is None:
grid_kws = {}
# Set up the PairGrid
diag_sharey = diag_kind == "hist"
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette,
diag_sharey=diag_sharey,
size=size, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singeton or a list of markers"
" for each level of the hue variable"))
grid.hue_kws = {"marker": markers}
# Maybe plot on the diagonal
if grid.square_grid:
if diag_kind == "hist":
grid.map_diag(plt.hist, **diag_kws)
elif diag_kind == "kde":
diag_kws["legend"] = False
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if grid.square_grid and diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
plot_kws.setdefault("edgecolor", "white")
plotter(plt.scatter, **plot_kws)
elif kind == "reg":
plotter(regplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
return grid
| bsd-3-clause |
liyu1990/sklearn | examples/mixture/plot_gmm_classifier.py | 22 | 4015 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
print(color)
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
shansixiong/geosearch | build/lib/geosearch/geosearch.py | 2 | 2159 | import re
import pandas as pd
import math
def preprocess(text):
'''Get only Captial Letters from the text'''
caps = re.findall(r"[A-Z][a-z]+", text)
caps = [word for word in caps if len(word) > 2]
text = " ".join(caps)
text = text.strip()
return text
def get_english(ls):
'''Get Only Englis Words'''
all = [x for x in ls if len(re.findall("[A-Za-z ]*", x)) == 2]
return all
def get_total_regions(alpha=0.8, infile="database.json"):
df = pd.DataFrame(pd.read_json(infile))
regions = df["region"]
regions = [x[:math.ceil(alpha * len(x))] for x in regions if x is not None]
ls = []
for x in regions:
ls += x
return "|".join(get_english(ls)) + "|"
def search_core(text, string, alpha=0.8):
'''Search through the text for max two words'''
text = preprocess(text)
res = []
ls = text.split(" ")
i = 0
while True:
if i >= len(ls):
break
if i <= len(ls) - 2:
two_combined = " ".join([ls[i], ls[i + 1]])
st = two_combined + "|"
if st in string:
res.append(two_combined)
i += 2
continue
st = ls[i] + "|"
if st in string:
res.append(ls[i])
i += 1
return res
def search_locations(text, alpha=0.8):
total_string = get_total_regions(alpha=alpha)
return search_core(text, total_string, alpha)
def search_countries(text):
with open("countries.txt", 'r') as f:
countries_ls = f.read()
countries_ls = "|".join(countries_ls.split("\n"))
return search_core(text, countries_ls, alpha=1)
def search_nationalities(text):
with open("nationalities.txt", 'r') as f:
nationalities = f.read()
nationalities = "|".join(nationalities.split("\n"))
text = preprocess(text).split(" ")
return [word for word in text if word in nationalities]
class geoSearch(object):
def __init__(self, text, alpha=0.8):
self.locations = search_locations(text, alpha)
self.countries = search_countries(text)
self.nationalities = search_nationalities(text) | mit |
k-eks/Burrow | Burrow.py | 1 | 17344 |
from __future__ import print_function # python 2.7 compatibility
from __future__ import division # python 2.7 compatibility
import sys
sys.path.append("/cluster/home/hoferg/python/lib64/python2.7/site-packages")
sys.path.append("/cluster/home/hoferg/python/lib64/python3.3/site-packages")
import cmd, io, os.path, os
import fancy_output as out
import analyze_data as ad
import meerkat_tools as mt
import h5py
import numpy as np
import math
import fabio
import cbf_tools
from PIL import Image
from matplotlib import pyplot
from matplotlib import colors as colorrange
import matplotlib.colors
NO_ERROR = "Parsing was successful!"
ERROR_NO_ARGUMENTS = "No arguments are given!"
ERROR_ODD_ARGUMENTS = "Wrong number of arguments are given!"
class Burrow(cmd.Cmd):
"""Processing of data generated by meerkat"""
LIN_SCALE = "lin"
LOG_SCALE = "log"
#onblock "constructor" and "destructor"
def preloop(self):
"""Setup of all data and settings for further use."""
self.dset = []
self.dsetName = []
self.currentData = None
self.meta = None
self.activeDset = None
self.currentImage = None
self.cuurentFrameSet = None
pyplot.ion() #turning interactive mode on
self.contrast_min = 1
self.contrast_max = 20
self.cmaps = ['Greys', 'gist_rainbow']
self.cmap_selection = 0
self.plotscale = self.LIN_SCALE
print("Type \"help\" to get a list of commands.")
def postloop(self):
"""Destructor, closes the hdf file."""
if self.dset != None:
for i in range(len(self.dset)):
self.dset[i].close()
#offblock
@property
def dataset(self):
"""Gets the active dataset."""
return self.dset[self.dsetName.index(self.activeDset)]
#onblock command line commands
#onblock exit comands
def do_exit(self, line):
"""exit the program"""
return True
def help_exit(self):
"""exit help page entry."""
print("This commad exits the program.")
def do_EOF(self, line):
"""This is the representation of the Ctrl+D shortcut."""
return True
def help_EOF(self):
"""EOF help page entry."""
print("This is the representation of the Ctrl+D shortcut.")
print("Hit Ctrl+D to exit the program.")
#offblock
def do_openFile(self, argument):
"""Opens a given file."""
errorcode, arguments = self.getArg(argument)
filename = "reconstruction.h5" # a default filename is assumed
filealias = "default" # a default name is assumed
if errorcode != ERROR_ODD_ARGUMENTS:
if "-n" in arguments:
filename = arguments[arguments.index("-n") + 1]
else:
out.warn("No file name is given! Trying default name \"" + filename + "\".")
if "-a" in arguments:
filealias = arguments[arguments.index("-a") + 1]
if os.path.isfile(filename):
#TODO:requires replacement with arkadiy's routine
file = h5py.File(filename, 'r')
out.okay("File successfully opened as " + filealias + "!")
self.activeDset = filealias
pyplot.close("all") # to prevent display issues
if filealias in self.dsetName:
self.dset[self.dsetName.index(filealias)] = file
else:
self.dset.append(file)
self.dsetName.append(filealias)
self.meta = mt.MeerkatMetaData(self.dataset)
out.warn("Active data set is now " + filealias)
else:
out.error("File \"" + filename + "\" does not exist!")
def complete_openFile(self, text, line, begidx, endidx):
"""Auto completion for files in openFile."""
allFiles = os.popen("ls").read().splitlines()
files = []
for f in allFiles:
if f.startswith(text): files.append(f)
return files
def help_openFile(self):
"""openFile help page entry."""
print("Opens a h5 file, either in meerkat or direct format.")
out.error("Only meerkat data format implemented at the moment!")
print("Arguments:")
print("\t-n <file name>\tspecifies the file name, if not given \"reconstruction.h5\" is assumed.")
print("\t-a <file alias>\tspecifies the alias for the data under which it can be called , if not given \"default\" is assumed.")
#offblock
def do_showFiles(self, argument):
"""Output of all active dsets"""
print("Active file: " + str(self.activeDset))
print("Currently open files:")
for s in self.dsetName:
print(s)
def help_showFiles(self):
"""showFiles help page entry"""
print("Prints all of the currently active files.")
def do_plothkl(self, argument):
"""plots a section of meerkat"""
"""Has some hard coded undistortion functions in it."""
out.warn("Only poor error checking implemented!")
out.warn("Only suitable for trigonal and hexagonal crystals!")
errorcode, arguments = self.getArg(argument)
if errorcode != ERROR_NO_ARGUMENTS and errorcode != ERROR_ODD_ARGUMENTS:
index = 0 # default value
if "-s" in arguments:
section = arguments[arguments.index("-s") + 1]
if "-i" in arguments:
index = arguments[arguments.index("-i") + 1]
if self.meta.format == mt.MeerkatMetaData.dtype_NORMAL:
trans = ad.Transformations(self.dataset, section)
self.currentData, x = ad.crossection_data(self.dataset, float(index), trans)
else:
slicer = mt.get_slicing_indices(section, int(index), self.meta.shape)
# be careful: the slicing in i,:,: does a weird x-y swap
self.currentData = (self.dataset['data'][slicer[0]:slicer[1],slicer[2]:slicer[3],slicer[4]:slicer[5]]).squeeze()
self.currentData = np.tile(self.currentData, (1,1))
# the following block is the hard coded undistortion of my trigonal crystals
if section == "hkx":
self.currentData = ad.hextransform(self.currentData)
elif section == "xkl" or section == "hxl":
# counteracting the weird slicing
a = math.radians(90)
T = np.array([[math.cos(a), math.sin(a)],[-math.sin(a), math.cos(a)]])
self.currentData = ad.imtransform_centered(self.currentData, T)
self.replot()
elif errorcode == ERROR_NO_ARGUMENTS:
out.error("No arguments supplied!")
def help_plothkl(self):
"""plothkl help page entry."""
print("Plots a section of the reciprocal space.")
print("Arguments:")
print("\t-s <section>\tselect a section, either hkx, hxl, xhl, uvx, uxw or xvw.")
print("\t-i <index>\tfills the place holder of x with a Miller index, if none is given, 0 is assumed")
def do_layout(self, argument):
"""Changes the layout of pyplot."""
errorcode, arguments = self.getArg(argument)
if self.activeDset != None:
if errorcode != ERROR_NO_ARGUMENTS and errorcode != ERROR_ODD_ARGUMENTS:
if "-min" in arguments:
self.contrast_min = float(arguments[arguments.index("-min") + 1])
if "-max" in arguments:
self.contrast_max = float(arguments[arguments.index("-max") + 1])
if "-c" in arguments:
self.cmap_selection = int(arguments[arguments.index("-c") + 1])
if self.cmap_selection >= len(self.cmaps) or self.cmap_selection < 0:
out.error("Unknown color map index.")
self.cmap_selection = 0
out.warn("Color map set to default.")
if "-scale" in arguments:
if (arguments[arguments.index("-scale") + 1] == self.LIN_SCALE) or (arguments[arguments.index("-scale") + 1] == self.LOG_SCALE):
self.plotscale = arguments[arguments.index("-scale") + 1]
else:
out.error("Unknown scale!")
self.replot()
elif errorcode != ERROR_NO_ARGUMENTS:
out.error("Input required!")
else:
out.error("No data selected!")
def help_layout(self):
"""layout help page entry."""
print("Changes the layout of the plot.")
print("\t-min <number> sets the minimum value of contrast")
print("\t-max <number> sets the maximun value of contrast")
print("\t-c <index> changes the color map, type \"help colormap\" to get a list of available color maps")
print("\t-scale <lin|log> changes the scale to either a linear or logarithmic scale")
def help_colormaps(self):
"""Displays all color maps"""
print("Available color maps:")
for i, color in enumerate(self.cmaps):
print(i, color)
def do_setActive(self, argument):
"""Activates a dataset."""
errorcode, arguments = self.getArg(argument)
alias = "default" #default value
if errorcode != ERROR_NO_ARGUMENTS and errorcode != ERROR_ODD_ARGUMENTS:
if "-a" in arguments:
alias = arguments[arguments.index("-a") + 1]
else:
out.warn("No arguments supplied, trying default.")
if alias in self.dsetName:
self.activeDset = alias
self.meta = mt.MeerkatMetaData(self.dataset)
pyplot.close("all") #in order to prevent display issues
out.okay("Activated " + alias + "!")
else:
out.error(alias + " not found!")
elif errorcode == ERROR_NO_ARGUMENTS:
out.error("No arguments supplied!")
def help_setActive(self):
"""setActive help page entry"""
print("Changes the active dataset")
print("\t-a <alias> alias of the dataset which should be activated")
def do_saveData(self, argument):
"""Saves the last displayed image as a csv file."""
errorcode, arguments = self.getArg(argument)
if errorcode != ERROR_NO_ARGUMENTS and errorcode != ERROR_ODD_ARGUMENTS:
if "-o" in arguments:
outfile = arguments[arguments.index("-o") + 1]
data = np.asarray(self.currentData)
np.savetxt(outfile, data, delimiter=";")
out.okay("Successfully saved as " + outfile + "!")
else:
out.error("No output name given!")
def help_saveData(self):
print("Saves the last displayed image as a csv file.")
print("\t-o <filename> name of the output file")
def do_saveImage(self, argument):
"""Saves the last displayed image as a csv file."""
errorcode, arguments = self.getArg(argument)
if self.currentImage != None:
if errorcode != ERROR_NO_ARGUMENTS and errorcode != ERROR_ODD_ARGUMENTS:
if "-o" in arguments:
outfile = arguments[arguments.index("-o") + 1]
self.currentImage.save(outfile)
out.okay("Successfully saved as " + outfile + "!")
else:
out.error("No output name given!")
else:
out.error("No image in buffer to save!")
def help_saveImage(self):
print("Saves the last displayed image as a png file.")
print("\t-o <filename> name of the output file")
def do_unplot(self, argument):
"""Closes all plot windows."""
# errorcode, arguments = self.getArg(argument)
pyplot.close("all")
def help_unplot(self):
"""help text for the unplot command."""
print("Close all visible plots.")
def do_info(self, argument):
"""Prints the meta data."""
if self.meta != None:
errorcode, arguments = self.getArg(argument)
if errorcode != self.CODE_ODD_ARGUMENTS:
options = "nafsit"
if "-p" in arguments:
options = arguments[arguments.index("-p") + 1]
if "n" in options: print("Active file name: %s" % os.path.split(self.dataset.filename)[1])
if "a" in options: print("Active file alias: %s" % self.activeDset)
if "f" in options: print("File format: %s" % self.meta.format)
if "s" in options: print("Pixel dimensions: x = %s, y = %s, z = %s" % tuple(self.meta.shape))
if self.meta.format == mt.MeerkatMetaData.dtype_NORMAL: # others do not have the fields
if "i" in options: print("Pixel dimensions: h = %s, k = %s, l = %s" % tuple(self.meta.hklRange))
if "t" in options: print("Pixel dimensions: h/x = %s, k/y = %s, l/z = %s" % tuple(self.meta.steps))
else:
out.error("Open a data file first!")
def help_info(self):
"""info help page entry"""
print("Shows the meta data of the currently selected data set.")
print("If no arguments are given, all metadata is printed.")
print("\t-p <selection> print only a minor part of the metadata.")
print("\tselection is a continous string containing at least one of the following options:")
print("\t\tn ... file name")
print("\t\ta ... file alias")
print("\t\tf ... file format")
print("\t\ts ... pixel dimensions in all three directions (shape of the array)")
print("\t\ti ... hkl indices in all three directions")
print("\t\tt ... step size of the reconstruction")
def do_readFrameSet(self, argument):
"""Reads in a frame set."""
errorcode, arguments = self.getArg(argument)
if errorcode != ERROR_NO_ARGUMENTS and errorcode != ERROR_ODD_ARGUMENTS:
pass
#onblock internal functions
def getArg(self, line):
"""Splits the arguments and checks if the correct number of arguments are given."""
errorcode = NO_ERROR
arguments = str(line).split()
if len(arguments) == 0: #no arguments supplied
arguments = ""
errorcode = ERROR_NO_ARGUMENTS
elif len(arguments) % 2 != 0: #invalid number of arguments
arguments = ""
errorcode = ERROR_ODD_ARGUMENTS
out.error("Invalid number of arguments!")
return errorcode, arguments
def replot(self):
"""pyplot distinguishes between 1D and 2D data, this function should call the right method."""
self.plot()
def plot(self):
"""Uses pyplot to draw 2D data."""
pyplot.clf()
height = self.meta.shape[0] #TODO:needs adjustment for different cuts
width = self.meta.shape[1] #TODO:needs adjustment for different cuts
if (height + width > 1000): #here are some issues with the display size
dpi = width / 5
else:
dpi = width
pyplot.figure(figsize=(height/99.9,width/99.9), dpi=dpi) #creates a display mash-up, corrected below
# The above line is a critical when it comes to image size
pyplot.axes([0,0,1,1])
pyplot.axis("off")
if self.plotscale == self.LIN_SCALE:
pyplot.imshow(self.currentData, interpolation='nearest', clim=[self.contrast_min, self.contrast_max], cmap=self.cmaps[self.cmap_selection])
elif self.plotscale == self.LOG_SCALE:
self.currentData[self.currentData < 0] = 0 # to pervent crashes
pyplot.imshow(self.currentData, interpolation='nearest', norm=matplotlib.colors.LogNorm(vmin=self.contrast_min, vmax=self.contrast_max), cmap=self.cmaps[self.cmap_selection])
self.currentImage = self.plot2img(pyplot.figure)
# the following two lines remove the display mash up produced above
pyplot.close(len(pyplot.get_fignums()) - 1)
pyplot.close(len(pyplot.get_fignums()) - 1)
pyplot.show()
#onblock convertion of images und plots
def plot2img(self, figure):
"""Converts a pyplot figure into a PIL image by the use of the buffer."""
buf = io.BytesIO()
pyplot.savefig(buf, format='png')
buf.seek(0)
return Image.open(buf)
#offblock
#end of internal functions
#offblock
if __name__ == '__main__':
"""Main loop creation, can run in terminal mode or script mode."""
if len(sys.argv) == 2: # in this case, it is assumed that the user provides a file with a list of commands
input = open(sys.argv[1], 'rt')
try:
# setting up silent script mode
interpreter = Burrow(stdin=input)
interpreter.use_rawinput = False # required for file input to read new lines etc correctly
interpreter.prompt = "" # silent mode
interpreter.cmdloop()
finally:
input.close()
elif len(sys.argv) == 1: # plain old commandline
interpreter = Burrow()
interpreter.cmdloop()
else:
out.error("Wrong input, <none> or <script filename> expected!") | gpl-2.0 |
JT5D/scikit-learn | sklearn/hmm.py | 3 | 47342 | # Hidden Markov Models
#
# Author: Ron Weiss <[email protected]>
# and Shiqiao Du <[email protected]>
# API changes: Jaques Grobler <[email protected]>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. If nobody volunteers to write documentation and
make it more stable, this module will be removed in version 0.11.
"""
import string
import numpy as np
from .utils import check_random_state, deprecated
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
@deprecated("HMM.eval was renamed to HMM.score_samples in 0.14 and will be"
" removed in 0.16.")
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats["trans"] += np.exp(logsumexp(lneta, 0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / cv_den
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can
contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat, 'm' for
means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.score_samples(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
| bsd-3-clause |
iulian787/spack | var/spack/repos/builtin/packages/yoda/package.py | 2 | 5522 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Yoda(AutotoolsPackage):
"""YODA - Yet more Objects for Data Analysis"""
homepage = "https://yoda.hepforge.org/"
url = "https://yoda.hepforge.org/downloads/?f=YODA-1.8.3.tar.bz2"
tags = ['hep']
version('1.8.3', sha256='d9dd0ea5e0f630cdf4893c09a40c78bd44455777c2125385ecc26fa9a2acba8a')
version('1.8.2', sha256='89558c11cf9b88b0899713e5b4bf8781fdcecc480ff155985ebbf148c6d80bdb')
version('1.8.1', sha256='51472e12065b9469f13906f0dc609e036d0c1dbd2a8e445e7d654aba73660112')
version('1.8.0', sha256='82c62bbaedb4b6b7d50cd42ce5409d453d46c1cc6724047db5efa74d34dd6dc5')
version('1.7.7', sha256='cfb64b099a79ec4d138792f0b464a8fbb04c4345143f77bbdca07acb744628ce')
version('1.7.6', sha256='864a1459c82676c991fcaed931263a415e815e3c9dc2cad2f94bda6fa4d112e5')
version('1.7.5', sha256='7b1dc7bb380d0fbadce12072f5cc21912c115e826182a3922d864e7edea131db')
version('1.7.4', sha256='3df316b89e9c0052104f8956e4f7d26c0b0b05cdace7d908be35c383361e3a71')
version('1.7.3', sha256='ebf6094733823e9cc2d1586aff06db2d8999c74a47e666baf305322f62c48058')
version('1.7.2', sha256='7f093cf947824ec118767c7c1999a50ea9343c173cf8c5062e3800ba54c2943e')
version('1.7.1', sha256='edd7971ecd272314309c800395200b07cf68547cbac3378a02d0b8c9ac03027b')
version('1.7.0', sha256='b3d6bfb0c52ed87cd240cee5e93e09102832d9ef32505d7275f4d3191a35ce3b')
version('1.6.7', sha256='2abf378573832c201bc6a9fecfff5b2006fc98c7a272540326cda8eb5bd95e16')
version('1.6.6', sha256='cf172a496d9108b93420530ea91055d07ecd514d2894d78db46b806530e91d21')
version('1.6.5', sha256='1477fe754cfe2e4e06aa363a773accf18aab960a8b899968b77834368cac14c5')
version('1.6.4', sha256='4c01f43c18b7b2e71f61dea0bb8c6fdc099c8e1a66256c510652884c4ffffbca')
version('1.6.3', sha256='1dd7e334fe54a05ff911d9e227d395abc5efd29e29d60187a036b2201f97da19')
version('1.6.2', sha256='5793cd1320694118423888801ca520f2719565fde04699ee69e1751f47cb57a8')
version('1.6.1', sha256='ec3f4cc4eb57f94fb431cc37db10eb831f025df95ffd9e516b8009199253c62b')
version('1.6.0', sha256='2920ef2588268484b650dc08438664a3539b79c65a9e80d58e3771bb699e2a6b')
version('1.5.9', sha256='1a19cc8c34c08f1797a93d355250e682eb85d62d4ab277b6714d7873b4bdde75')
version('1.5.8', sha256='011c5be5cc565f8baf02e7ebbe57a57b4d70dc6a528d5b0102700020bbf5a973')
version('1.5.7', sha256='f775df11b034154b8f5d43f12007692c3314672e60d3e554b3928fe5b0f00c29')
version('1.5.6', sha256='050e17b1b80658213281a2e4112dfecc0096f01f269cd739d601b2fd0e790a0c')
version('1.5.5', sha256='ce45df6248c6c50633953048240513dc52ca5c9144ef69ea72ada2df23bc4918')
version('1.5.4', sha256='c41853a1f3aa0794875ae09c1ba4348942eb890e798ac7cee6e3505a9b68b678')
version('1.5.3', sha256='1220ac0ae204c3ed6b22a6a35c30d9b5c1ded35a1054cff131861b4a919d4904')
version('1.5.2', sha256='ec113c53a6174b174aaea8f45802cc419184ce056123b93ab8d3f80fc1bd4986')
version('1.5.1', sha256='a8b088b3ede67d560e40f91f4f99be313f21841c46ce2f657af7692a7bbe3276')
version('1.5.0', sha256='2c2b77344854fac937a8ef07c0928c50829ff4c69bcad6e0afb92da611b7dd18')
version('1.4.0', sha256='e76a129f7c2b72b53525fe0b712606eeeab0dc145daa070ebf0728f0384eaf48')
version('1.3.1', sha256='274e196d009e3aac6dd1f2db876de9613ca1a3c21ec3364bc3662f5493bc9747')
version('1.3.0', sha256='d63197d5940b481ecb06cf4703d9c0b49388f32cad61ccae580d1b80312bd215')
version('1.2.1', sha256='e86964e91e4fbbba443d2848f55c028001de4713dcc64c40339389de053e7d8b')
version('1.2.0', sha256='143fa86cd7965d26d3897a5752307bfe08f4866c2f9a9f226a393127d19ee353')
version('1.1.0', sha256='5d2e8f3c1cddfb59fe651931c7c605fe0ed067864fa86047aed312c6a7938e01')
version('1.0.7', sha256='145c27d922c27a4e1d6d50030f4ddece5f03d6c309a5e392a5fcbb5e83e747ab')
version('1.0.6', sha256='357732448d67a593e5ff004418f2a2a263a1401ffe84e021f8a714aa183eaa21')
version('1.0.5', sha256='ba72bc3943a1b39fa63900570948199cf5ed5c7523f2c4af4740e51b098f1794')
version('1.0.4', sha256='697fe397c69689feecb2a731e19b2ff85e19343b8198c4f18a7064c4f7123950')
version('1.0.3', sha256='6a1d1d75d9d74da457726ea9463c1b0b6ba38d4b43ef54e1c33f885e70fdae4b')
variant("root", default=False, description="Enable ROOT interface")
depends_on('python', type=('build', 'run'))
depends_on('py-future', type=('build', 'run'))
depends_on('boost', when='@:1.6.0', type=('build', 'run'))
depends_on('py-cython', type='build')
depends_on('py-matplotlib', when='@1.3.0:', type=('build', 'run'))
depends_on('root', type=('build', 'run'), when='+root')
patch('yoda-1.5.5.patch', level=0, when='@1.5.5')
patch('yoda-1.5.9.patch', level=0, when='@1.5.9')
patch('yoda-1.6.1.patch', level=0, when='@1.6.1')
patch('yoda-1.6.2.patch', level=0, when='@1.6.2')
patch('yoda-1.6.3.patch', level=0, when='@1.6.3')
patch('yoda-1.6.4.patch', level=0, when='@1.6.4')
patch('yoda-1.6.5.patch', level=0, when='@1.6.5')
patch('yoda-1.6.6.patch', level=0, when='@1.6.6')
patch('yoda-1.6.7.patch', level=0, when='@1.6.7')
def configure_args(self):
args = []
if self.spec.satisfies('@:1.6.0'):
args += '--with-boost=' + self.spec['boost'].prefix
if '+root' in self.spec:
args += '--enable-root'
return args
| lgpl-2.1 |
benfitzpatrick/rose | etc/tutorial/cylc-forecasting-suite/lib/python/util.py | 4 | 9244 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (C) 2012-2019 British Crown (Met Office) & Contributors - GNU V3+.
# This is illustrative code developed for tutorial purposes, it is not
# intended for scientific use and is not guarantied to be accurate or correct.
# -----------------------------------------------------------------------------
from copy import copy
import math
import jinja2
import sys
R_0 = 6371. # Radius of the Earth (km).
def frange(start, stop, step):
"""Implementation of python's xrange which works with floats."""
while start < stop:
yield start
start += step
def read_csv(filename, cast=float):
"""Reads in data from a 2D csv file.
Args:
filename (str): The path to the file to read.
cast (function): A function to call on each value to convert the data
into the desired format.
"""
data = []
with open(filename, 'r') as datafile:
line = datafile.readline()
while line:
data.append(list(map(cast, line.split(','))))
line = datafile.readline()
return data
def write_csv(filename, matrix, fmt='%.2f'):
"""Write data from a 2D array to a csv format file."""
with open(filename, 'w+') as datafile:
for row in matrix:
datafile.write(', '.join(fmt % x for x in row) + '\n')
def field_to_csv(field, x_range, y_range, filename):
"""Extrapolate values from the field and write them to a csv file.
Args:
filename (str): The path of the csv file to write to.
field (function): A function of the form f(x, y) -> z.
x_range (list): List of the x coordinates of the extrapolated grid.
These are the extrapolation coordinates, the length of this list
defines the size of the grid.
x_range (list): List of the y coordinates of the extrapolated grid.
These are the extrapolation coordinates, the length of this list
defines the size of the grid.
"""
with open(filename, 'w+') as csv_file:
for itt_y in y_range:
csv_file.write(', '.join('%.2f' % field(x, itt_y) for
x in x_range) + '\n')
def generate_matrix(dim_x, dim_y, value=0.):
"""Generates a 2D list with the desired dimensions.
Args:
dim_x (int): The x-dimension of the matrix.
dim_y (int): The y-dimension of the matrix.
value: The default value for each cell of the matrix.
"""
matrix = []
for _ in range(dim_y):
matrix.append([copy(value)] * dim_x)
return matrix
def permutations(collection_1, collection_2):
"""Yield all permutations of two collections."""
for val_1 in collection_1:
for val_2 in collection_2:
yield val_1, val_2
def great_arc_distance(coordinate_1, coordinate_2):
"""Compute the distance between two (lng, lat) coordinates in km.
Uses the Haversine formula.
Args:
coordinate_1 (tuple): A 2-tuple (lng, lat) of the first coordinate.
coordinate_2 (tuple): A 2-tuple (lng, lat) of the second coordinate.
"""
(lng_1, lat_1) = coordinate_1
(lng_2, lat_2) = coordinate_2
lng_1 = math.radians(lng_1)
lat_1 = math.radians(lat_1)
lng_2 = math.radians(lng_2)
lat_2 = math.radians(lat_2)
return (
2 * R_0 * math.asin(
math.sqrt(
(math.sin((lat_2 - lat_1) / 2.) ** 2) + (
math.cos(lat_1) *
math.cos(lat_2) *
(math.sin((lng_2 - lng_1) / 2.) ** 2)
)
)
)
)
def interpolate_grid(points, dim_x, dim_y, d_x, d_y, spline_order=0):
"""Interpolate 2D data onto a grid.
Args:
points (list): The points to interpolate as a list of 3-tuples
(x, y, z).
dim_x (int): The size of the grid in the x-dimension.
dim_y (int): The size of the grid in the y-dimension.
d_x (float): The grid spacing in the x-dimension.
d_y (float): The grid spacing in the y-dimension.
spline_order (int): The order of the beta-spline to use for
interpolation (0 = nearset).
Return:
list - 2D matrix of dimensions dim_x, dim_y containing the interpolated
data.
"""
def spline_0(pos_x, pos_y, z_val):
"""Zeroth order beta spline (i.e. nearest point)."""
return [(int(round(pos_x)), int(round(pos_y)), z_val)] # [(x, y, z)]
def spline_1(pos_x, pos_y, z_val):
"""First order beta spline (weight spread about four nearest ponts)."""
x_0 = int(math.floor(pos_x))
y_0 = int(math.floor(pos_y))
x_1 = x_0 + 1
y_1 = y_0 + 1
return [
# (x, y, z), ...
(x_0, y_0, (x_0 + d_x - pos_x) * (y_0 + d_y - pos_y) * z_val),
(x_1, y_0, (pos_x - x_0) * (y_0 + d_y - pos_y) * z_val),
(x_0, y_1, (x_0 + d_x - pos_x) * (pos_y - y_0) * z_val),
(x_1, y_1, (pos_x - x_0) * (pos_y - y_0) * z_val)
]
if spline_order == 0:
spline = spline_0
elif spline_order == 1:
spline = spline_1
else:
raise ValueError('Invalid spline order "%d" must be in (0, 1).' %
spline_order)
grid = generate_matrix(dim_x, dim_y, 0.)
for x_val, y_val, z_val in points:
x_coord = x_val / d_x
y_coord = y_val / d_y
for grid_x, grid_y, grid_z in spline(x_coord, y_coord, z_val):
try:
grid[grid_y][grid_x] += grid_z
except IndexError:
# Grid point out of bounds => skip.
pass
return grid
def plot_vector_grid(filename, x_grid, y_grid):
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
print('Plotting disabled', file=sys.stderr)
return
fig = plt.figure()
x_coords = []
y_coords = []
z_coords = []
for itt_x in range(len(x_grid[0])):
for itt_y in range(len(x_grid)):
x_coords.append(itt_x)
y_coords.append(itt_y)
z_coords.append((
x_grid[itt_y][itt_x],
y_grid[itt_y][itt_x]
))
plt.quiver(x_coords,
y_coords,
[x[0] for x in z_coords],
[y[1] for y in z_coords])
fig.savefig(filename)
def get_grid_coordinates(lng, lat, domain, resolution):
"""Return the grid coordinates for a lat, long coordinate pair."""
# NOTE: Grid coordinates run from *top* left to bottom right.
length_y = int(abs(domain['lat2'] - domain['lat1']) // resolution)
return (
int((abs(lng - domain['lng1'])) // resolution),
length_y - int((abs(lat - domain['lat1'])) // resolution))
class SurfaceFitter(object):
"""A 2D interpolation for random points.
A standin for scipy.interpolate.interp2d
Args:
x_points (list): A list of the x coordinates of the points to
interpolate.
y_points (list): A list of the y coordinates of the points.
z_points (list): A list of the z coordinates of the points.
kind (str): String representing the order of the interpolation to
perform (either linear, quadratic or cubic).
Returns:
function: fcn(x, y) -> z
"""
def __init__(self, x_points, y_points, z_points, kind='linear'):
self.points = list(zip(x_points, y_points, z_points))
if kind == 'linear':
self.power = 1.
elif kind == 'quadratic':
self.power = 2.
elif kind == 'cubic':
self.power = 3.
else:
raise ValueError('"%s" is not a valid interpolation method' % kind)
def __call__(self, grid_x, grid_y):
sum_value = 0.0
sum_weight = 0.0
z_val = None
for x_point, y_point, z_point in self.points:
d_x = grid_x - x_point
d_y = grid_y - y_point
if d_x == 0 and d_y == 0:
# This point is exactly at the grid location we are
# interpolating for, return this value.
z_val = z_point
break
else:
weight = 1. / ((math.sqrt(d_x ** 2 + d_y ** 2)) ** self.power)
sum_weight += weight
sum_value += weight * z_point
if z_val is None:
z_val = sum_value / sum_weight
return z_val
def parse_domain(domain):
bbox = list(map(float, domain.split(',')))
return {
'lng1': bbox[0],
'lat1': bbox[1],
'lng2': bbox[2],
'lat2': bbox[3]
}
def generate_html_map(filename, template_file, data, domain, resolution):
with open(template_file, 'r') as template:
with open(filename, 'w+') as html_file:
html_file.write(jinja2.Template(template.read()).render(
resolution=resolution,
lng1=domain['lng1'],
lng2=domain['lng2'],
lat1=domain['lat1'],
lat2=domain['lat2'],
data=data))
| gpl-3.0 |
kinverarity1/gpgLabs | EM/FEM3Loop/FEM3loop.py | 1 | 4284 | import numpy as np
import matplotlib.pyplot as plt
import scipy.io
def mind(x,y,z,dincl,ddecl,x0,y0,z0,aincl,adecl):
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
z = np.array(z, dtype=float)
x0 = np.array(x0, dtype=float)
y0 = np.array(y0, dtype=float)
z0 = np.array(z0, dtype=float)
dincl = np.array(dincl, dtype=float)
ddecl = np.array(ddecl, dtype=float)
aincl = np.array(aincl, dtype=float)
adecl = np.array(adecl, dtype=float)
di=np.pi*dincl/180.0
dd=np.pi*ddecl/180.0
cx=np.cos(di)*np.cos(dd)
cy=np.cos(di)*np.sin(dd)
cz=np.sin(di)
ai=np.pi*aincl/180.0
ad=np.pi*adecl/180.0
ax=np.cos(ai)*np.cos(ad)
ay=np.cos(ai)*np.sin(ad)
az=np.sin(ai)
# begin the calculation
a=x-x0
b=y-y0
h=z-z0
rt=np.sqrt(a**2.+b**2.+h**2.)**5.
txy=3.*a*b/rt
txz=3.*a*h/rt
tyz=3.*b*h/rt
txx=(2.*a**2.-b**2.-h**2.)/rt
tyy=(2.*b**2.-a**2.-h**2.)/rt
tzz=-(txx+tyy)
bx= (txx*cx+txy*cy+txz*cz)
by= (txy*cx+tyy*cy+tyz*cz)
bz= (txz*cx+tyz*cy+tzz*cz)
return bx*ax+by*ay+bz*az
def fem3loop(L,R,xc,yc,zc,dincl,ddecl,S,ht,f,xmin,xmax,dx):
L = np.array(L, dtype=float)
R = np.array(R, dtype=float)
xc = np.array(xc, dtype=float)
yc = np.array(yc, dtype=float)
zc = np.array(zc, dtype=float)
dincl = np.array(dincl, dtype=float)
ddecl = np.array(ddecl, dtype=float)
S = np.array(S, dtype=float)
ht = np.array(ht, dtype=float)
f = np.array(f, dtype=float)
xmin = np.array(xmin, dtype=float)
xmax = np.array(xmax, dtype=float)
dx = np.array(dx, dtype=float)
ymin = xmin
ymax = xmax
dely = dx
# generate the grid
xp=np.arange(xmin,xmax,dx)
yp=np.arange(ymin,ymax,dely)
[y,x]=np.meshgrid(yp,xp)
z=0.*x-ht
# set up the response arrays
real_response=0.0*x
imag_response=0.0*x
# frequency characteristics
alpha=2.*np.pi*f*L/R
f_factor=(alpha**2.+1j*alpha)/(1+alpha**2.)
amin=0.01
amax=100.
da=4./40.
alf=np.arange(-2.,2.,da)
alf=10.**alf
fre=alf**2./(1.+alf**2.)
fim=alf/(1.+alf**2.)
# simulate anomalies
yt=y-S/2.
yr=y+S/2.
dm=-S/2.
dp= S/2.
M13=mind(0.,dm,0.,90.,0., 0., dp, 0., 90.,0.)
M12=L*mind(x,yt,z,90.,0.,xc,yc,zc,dincl,ddecl)
M23=L*mind(xc,yc,zc,dincl,ddecl,x,yr,z,90.,0.)
c_response=-M12*M23*f_factor/(M13*L)
# scaled to simulate a net volumetric effect
real_response=np.real(c_response)*1000.
imag_response=np.imag(c_response)*1000.
fig, ax = plt.subplots(2,2, figsize = (10,6))
plt.subplot(2,2,1)
plt.semilogx(alf,fre,'.-b')
plt.semilogx(alf,fim,'.--g')
plt.plot([alpha, alpha],[0., 1.],'-k')
plt.legend(['Real','Imag'],loc=2)
plt.xlabel('$\\alpha = \\omega L /R$')
plt.ylabel('Frequency Response')
plt.title('Plot 1: EM responses of loop')
plt.subplot(2,2,2)
kx = np.ceil(xp.size/2.)
plt.plot(y[kx,:],real_response[kx,:],'.-b')
plt.plot(y[kx,:],imag_response[kx,:],'.--g')
# plt.legend(['Real','Imag'],loc=2)
plt.xlabel('Easting')
plt.ylabel('H$_s$/H$_p$')
plt.title('Plot 2: EW cross section along Northing = %1.1f' %(x[kx,0]))
vminR = real_response.min()
vmaxR = real_response.max()
plt.subplot(2,2,3)
plt.plot(np.r_[xp.min(),xp.max()], np.zeros(2), 'k--', lw=1)
plt.imshow(real_response,extent=[xp.min(),xp.max(),yp.min(),yp.max()], vmin = vminR, vmax = vmaxR)
plt.xlabel('Easting (m)')
plt.ylabel('Northing (m)')
plt.title('Plot 3: Real Component')
clb = plt.colorbar()
clb.set_label('H$_s$/H$_p$')
plt.tight_layout()
vminI = imag_response.min()
vmaxI = imag_response.max()
plt.subplot(2,2,4)
plt.plot(np.r_[xp.min(),xp.max()], np.zeros(2), 'k--', lw=1)
plt.imshow(imag_response,extent=[xp.min(),xp.max(),yp.min(),yp.max()], vmin = vminI, vmax = vmaxI)
plt.xlabel('Easting (m)')
plt.ylabel('Northing (m)')
plt.title('Plot 4: Imag Component')
clb = plt.colorbar()
clb.set_label('H$_s$/H$_p$')
plt.tight_layout()
plt.show()
def interactfem3loop(L,R,xc,yc,zc,dincl,ddecl,f,dx,default=True):
if default == True:
dx = 0.25
xc = 0.
yc = 0.
zc = 1.
dincl = 0.
ddecl = 90.
L = 0.1
R = 2000.
S = 4.
ht = 1.
xmin = -40.*dx
xmax = 40.*dx
return fem3loop(L,R,-yc,xc,zc,dincl,ddecl,S,ht,f,xmin,xmax,dx)
if __name__ == '__main__':
L = 0.1
R = 2000
xc = 0.
yc = 0.
zc = 2.
dincl = 0.
ddecl = 90.
S = 4.
ht = 0.
f = 10000.
xmin = -10.
xmax = 10.
dx = 0.25
fem3loop(L,R,xc,yc,zc,dincl,ddecl,S,ht,f,xmin,xmax,dx)
| mit |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/tools/rplot.py | 9 | 29164 | import random
import warnings
from copy import deepcopy
from pandas.core.common import _values_from_object
import numpy as np
from pandas.compat import range, zip
#
# TODO:
# * Make sure legends work properly
#
warnings.warn("\n"
"The rplot trellis plotting interface is deprecated and will be "
"removed in a future version. We refer to external packages "
"like seaborn for similar but more refined functionality. \n\n"
"See our docs http://pandas.pydata.org/pandas-docs/stable/visualization.html#rplot "
"for some example how to convert your existing code to these "
"packages.", FutureWarning, stacklevel=2)
class Scale:
"""
Base class for mapping between graphical and data attributes.
"""
pass
class ScaleGradient(Scale):
"""
A mapping between a data attribute value and a
point in colour space between two specified colours.
"""
def __init__(self, column, colour1, colour2):
"""Initialize ScaleGradient instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere between colour1 and colour2
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
x_scaled = (x - a) / (b - a)
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
class ScaleGradient2(Scale):
"""
Create a mapping between a data attribute value and a
point in colour space in a line of three specified colours.
"""
def __init__(self, column, colour1, colour2, colour3):
"""Initialize ScaleGradient2 instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
colour3: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.colour3 = colour3
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere along the line
of colour1, colour2 and colour3
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
r3, g3, b3 = self.colour3
x_scaled = (x - a) / (b - a)
if x_scaled < 0.5:
x_scaled *= 2.0
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
else:
x_scaled = (x_scaled - 0.5) * 2.0
return (r2 + (r3 - r2) * x_scaled,
g2 + (g3 - g2) * x_scaled,
b2 + (b3 - b2) * x_scaled)
class ScaleSize(Scale):
"""
Provide a mapping between a DataFrame column and matplotlib
scatter plot shape size.
"""
def __init__(self, column, min_size=5.0, max_size=100.0, transform=lambda x: x):
"""Initialize ScaleSize instance.
Parameters:
-----------
column: string, a column name
min_size: float, minimum point size
max_size: float, maximum point size
transform: a one argument function of form float -> float (e.g. lambda x: log(x))
"""
self.column = column
self.min_size = min_size
self.max_size = max_size
self.transform = transform
self.categorical = False
def __call__(self, data, index):
"""Return matplotlib scatter plot marker shape size.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
x = data[self.column].iget(index)
a = float(min(data[self.column]))
b = float(max(data[self.column]))
return self.transform(self.min_size + ((x - a) / (b - a)) *
(self.max_size - self.min_size))
class ScaleShape(Scale):
"""
Provides a mapping between matplotlib marker shapes
and attribute values.
"""
def __init__(self, column):
"""Initialize ScaleShape instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.shapes = ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x']
self.legends = set([])
self.categorical = True
def __call__(self, data, index):
"""Returns a matplotlib marker identifier.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
a matplotlib marker identifier
"""
values = sorted(list(set(data[self.column])))
if len(values) > len(self.shapes):
raise ValueError("Too many different values of the categorical attribute for ScaleShape")
x = data[self.column].iget(index)
return self.shapes[values.index(x)]
class ScaleRandomColour(Scale):
"""
Maps a random colour to a DataFrame attribute.
"""
def __init__(self, column):
"""Initialize ScaleRandomColour instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.categorical = True
def __call__(self, data, index):
"""Return a tuple of three floats, representing
an RGB colour.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
random.seed(data[self.column].iget(index))
return [random.random() for _ in range(3)]
class ScaleConstant(Scale):
"""
Constant returning scale. Usually used automatically.
"""
def __init__(self, value):
"""Initialize ScaleConstant instance.
Parameters:
-----------
value: any Python value to be returned when called
"""
self.value = value
self.categorical = False
def __call__(self, data, index):
"""Return the constant value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A constant value specified during initialisation
"""
return self.value
def default_aes(x=None, y=None):
"""Create the default aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
Returns:
--------
a dictionary with aesthetics bindings
"""
return {
'x' : x,
'y' : y,
'size' : ScaleConstant(40.0),
'colour' : ScaleConstant('grey'),
'shape' : ScaleConstant('o'),
'alpha' : ScaleConstant(1.0),
}
def make_aes(x=None, y=None, size=None, colour=None, shape=None, alpha=None):
"""Create an empty aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
size: function, binding for size attribute of Geoms
colour: function, binding for colour attribute of Geoms
shape: function, binding for shape attribute of Geoms
alpha: function, binding for alpha attribute of Geoms
Returns:
--------
a dictionary with aesthetics bindings
"""
if not hasattr(size, '__call__') and size is not None:
size = ScaleConstant(size)
if not hasattr(colour, '__call__') and colour is not None:
colour = ScaleConstant(colour)
if not hasattr(shape, '__call__') and shape is not None:
shape = ScaleConstant(shape)
if not hasattr(alpha, '__call__') and alpha is not None:
alpha = ScaleConstant(alpha)
if any([isinstance(size, scale) for scale in [ScaleConstant, ScaleSize]]) or size is None:
pass
else:
raise ValueError('size mapping should be done through ScaleConstant or ScaleSize')
if any([isinstance(colour, scale) for scale in [ScaleConstant, ScaleGradient, ScaleGradient2, ScaleRandomColour]]) or colour is None:
pass
else:
raise ValueError('colour mapping should be done through ScaleConstant, ScaleRandomColour, ScaleGradient or ScaleGradient2')
if any([isinstance(shape, scale) for scale in [ScaleConstant, ScaleShape]]) or shape is None:
pass
else:
raise ValueError('shape mapping should be done through ScaleConstant or ScaleShape')
if any([isinstance(alpha, scale) for scale in [ScaleConstant]]) or alpha is None:
pass
else:
raise ValueError('alpha mapping should be done through ScaleConstant')
return {
'x' : x,
'y' : y,
'size' : size,
'colour' : colour,
'shape' : shape,
'alpha' : alpha,
}
class Layer:
"""
Layer object representing a single plot layer.
"""
def __init__(self, data=None, **kwds):
"""Initialize layer object.
Parameters:
-----------
data: pandas DataFrame instance
aes: aesthetics dictionary with bindings
"""
self.data = data
self.aes = make_aes(**kwds)
self.legend = {}
def work(self, fig=None, ax=None):
"""Do the drawing (usually) work.
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis object
Returns:
--------
a tuple with the same figure and axis instances
"""
return fig, ax
class GeomPoint(Layer):
def work(self, fig=None, ax=None):
"""Render the layer on a matplotlib axis.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
for index in range(len(self.data)):
row = self.data.iloc[index]
x = row[self.aes['x']]
y = row[self.aes['y']]
size_scaler = self.aes['size']
colour_scaler = self.aes['colour']
shape_scaler = self.aes['shape']
alpha = self.aes['alpha']
size_value = size_scaler(self.data, index)
colour_value = colour_scaler(self.data, index)
marker_value = shape_scaler(self.data, index)
alpha_value = alpha(self.data, index)
patch = ax.scatter(x, y,
s=size_value,
c=colour_value,
marker=marker_value,
alpha=alpha_value)
label = []
if colour_scaler.categorical:
label += [colour_scaler.column, row[colour_scaler.column]]
if shape_scaler.categorical:
label += [shape_scaler.column, row[shape_scaler.column]]
self.legend[tuple(label)] = patch
ax.set_xlabel(self.aes['x'])
ax.set_ylabel(self.aes['y'])
return fig, ax
class GeomPolyFit(Layer):
"""
Draw a polynomial fit of specified degree.
"""
def __init__(self, degree, lw=2.0, colour='grey'):
"""Initialize GeomPolyFit object.
Parameters:
-----------
degree: an integer, polynomial degree
lw: line width
colour: matplotlib colour
"""
self.degree = degree
self.lw = lw
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw the polynomial fit on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from numpy.polynomial.polynomial import polyfit
from numpy.polynomial.polynomial import polyval
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
min_x = min(x)
max_x = max(x)
c = polyfit(x, y, self.degree)
x_ = np.linspace(min_x, max_x, len(x))
y_ = polyval(x_, c)
ax.plot(x_, y_, lw=self.lw, c=self.colour)
return fig, ax
class GeomScatter(Layer):
"""
An efficient scatter plot, use this instead of GeomPoint for speed.
"""
def __init__(self, marker='o', colour='lightblue', alpha=1.0):
"""Initialize GeomScatter instance.
Parameters:
-----------
marker: matplotlib marker string
colour: matplotlib colour
alpha: matplotlib alpha
"""
self.marker = marker
self.colour = colour
self.alpha = alpha
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a scatter plot on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
ax.scatter(x, y, marker=self.marker, c=self.colour, alpha=self.alpha)
return fig, ax
class GeomHistogram(Layer):
"""
An efficient histogram, use this instead of GeomBar for speed.
"""
def __init__(self, bins=10, colour='lightblue'):
"""Initialize GeomHistogram instance.
Parameters:
-----------
bins: integer, number of histogram bins
colour: matplotlib colour
"""
self.bins = bins
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a histogram on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
ax.hist(_values_from_object(x), self.bins, facecolor=self.colour)
ax.set_xlabel(self.aes['x'])
return fig, ax
class GeomDensity(Layer):
"""
A kernel density estimation plot.
"""
def work(self, fig=None, ax=None):
"""Draw a one dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from scipy.stats import gaussian_kde
x = self.data[self.aes['x']]
gkde = gaussian_kde(x)
ind = np.linspace(x.min(), x.max(), 200)
ax.plot(ind, gkde.evaluate(ind))
return fig, ax
class GeomDensity2D(Layer):
def work(self, fig=None, ax=None):
"""Draw a two dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
rvs = np.array([x, y])
x_min = x.min()
x_max = x.max()
y_min = y.min()
y_max = y.max()
X, Y = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
import scipy.stats as stats
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.contour(Z, extent=[x_min, x_max, y_min, y_max])
return fig, ax
class TrellisGrid(Layer):
def __init__(self, by):
"""Initialize TreelisGrid instance.
Parameters:
-----------
by: column names to group by
"""
if len(by) != 2:
raise ValueError("You must give a list of length 2 to group by")
elif by[0] == '.' and by[1] == '.':
raise ValueError("At least one of grouping attributes must be not a dot")
self.by = by
def trellis(self, layers):
"""Create a trellis structure for a list of layers.
Each layer will be cloned with different data in to a two dimensional grid.
Parameters:
-----------
layers: a list of Layer objects
Returns:
--------
trellised_layers: Clones of each layer in the list arranged in a trellised latice
"""
trellised_layers = []
for layer in layers:
data = layer.data
if self.by[0] == '.':
grouped = data.groupby(self.by[1])
elif self.by[1] == '.':
grouped = data.groupby(self.by[0])
else:
grouped = data.groupby(self.by)
groups = list(grouped.groups.keys())
if self.by[0] == '.' or self.by[1] == '.':
shingle1 = set([g for g in groups])
else:
shingle1 = set([g[0] for g in groups])
shingle2 = set([g[1] for g in groups])
if self.by[0] == '.':
self.rows = 1
self.cols = len(shingle1)
elif self.by[1] == '.':
self.rows = len(shingle1)
self.cols = 1
else:
self.rows = len(shingle1)
self.cols = len(shingle2)
trellised = [[None for _ in range(self.cols)] for _ in range(self.rows)]
self.group_grid = [[None for _ in range(self.cols)] for _ in range(self.rows)]
row = 0
col = 0
for group, data in grouped:
new_layer = deepcopy(layer)
new_layer.data = data
trellised[row][col] = new_layer
self.group_grid[row][col] = group
col += 1
if col >= self.cols:
col = 0
row += 1
trellised_layers.append(trellised)
return trellised_layers
def dictionary_union(dict1, dict2):
"""Take two dictionaries, return dictionary union.
Parameters:
-----------
dict1: Python dictionary
dict2: Python dictionary
Returns:
--------
A union of the dictionaries. It assumes that values
with the same keys are identical.
"""
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
result = {}
for key1 in keys1:
result[key1] = dict1[key1]
for key2 in keys2:
result[key2] = dict2[key2]
return result
def merge_aes(layer1, layer2):
"""Merges the aesthetics dictionaries for the two layers.
Look up sequence_layers function. Which layer is first and which
one is second is important.
Parameters:
-----------
layer1: Layer object
layer2: Layer object
"""
for key in layer2.aes.keys():
if layer2.aes[key] is None:
layer2.aes[key] = layer1.aes[key]
def sequence_layers(layers):
"""Go through the list of layers and fill in the missing bits of information.
The basic rules are this:
* If the current layer has data set to None, take the data from previous layer.
* For each aesthetic mapping, if that mapping is set to None, take it from previous layer.
Parameters:
-----------
layers: a list of Layer objects
"""
for layer1, layer2 in zip(layers[:-1], layers[1:]):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layers
def sequence_grids(layer_grids):
"""Go through the list of layer girds and perform the same thing as sequence_layers.
Parameters:
-----------
layer_grids: a list of two dimensional layer grids
"""
for grid1, grid2 in zip(layer_grids[:-1], layer_grids[1:]):
for row1, row2 in zip(grid1, grid2):
for layer1, layer2 in zip(row1, row2):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layer_grids
def work_grid(grid, fig):
"""Take a two dimensional grid, add subplots to a figure for each cell and do layer work.
Parameters:
-----------
grid: a two dimensional grid of layers
fig: matplotlib figure to draw on
Returns:
--------
axes: a two dimensional list of matplotlib axes
"""
nrows = len(grid)
ncols = len(grid[0])
axes = [[None for _ in range(ncols)] for _ in range(nrows)]
for row in range(nrows):
for col in range(ncols):
axes[row][col] = fig.add_subplot(nrows, ncols, ncols * row + col + 1)
grid[row][col].work(ax=axes[row][col])
return axes
def adjust_subplots(fig, axes, trellis, layers):
"""Adjust the subtplots on matplotlib figure with the
fact that we have a trellis plot in mind.
Parameters:
-----------
fig: matplotlib figure
axes: a two dimensional grid of matplotlib axes
trellis: TrellisGrid object
layers: last grid of layers in the plot
"""
# Flatten the axes grid
axes = [ax for row in axes for ax in row]
min_x = min([ax.get_xlim()[0] for ax in axes])
max_x = max([ax.get_xlim()[1] for ax in axes])
min_y = min([ax.get_ylim()[0] for ax in axes])
max_y = max([ax.get_ylim()[1] for ax in axes])
[ax.set_xlim(min_x, max_x) for ax in axes]
[ax.set_ylim(min_y, max_y) for ax in axes]
for index, axis in enumerate(axes):
if index % trellis.cols == 0:
pass
else:
axis.get_yaxis().set_ticks([])
axis.set_ylabel('')
if index / trellis.cols == trellis.rows - 1:
pass
else:
axis.get_xaxis().set_ticks([])
axis.set_xlabel('')
if trellis.by[0] == '.':
label1 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
elif trellis.by[1] == '.':
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
else:
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols][0])
label2 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols][1])
if label2 is not None:
axis.table(cellText=[[label1], [label2]],
loc='top', cellLoc='center',
cellColours=[['lightgrey'], ['lightgrey']])
else:
axis.table(cellText=[[label1]], loc='top', cellLoc='center', cellColours=[['lightgrey']])
# Flatten the layer grid
layers = [layer for row in layers for layer in row]
legend = {}
for layer in layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
fig.subplots_adjust(wspace=0.05, hspace=0.2)
class RPlot:
"""
The main plot object. Add layers to an instance of this object to create a plot.
"""
def __init__(self, data, x=None, y=None):
"""Initialize RPlot instance.
Parameters:
-----------
data: pandas DataFrame instance
x: string, DataFrame column name
y: string, DataFrame column name
"""
self.layers = [Layer(data, **default_aes(x=x, y=y))]
trellised = False
def add(self, layer):
"""Add a layer to RPlot instance.
Parameters:
-----------
layer: Layer instance
"""
if not isinstance(layer, Layer):
raise TypeError("The operand on the right side of + must be a Layer instance")
self.layers.append(layer)
def render(self, fig=None):
"""Render all the layers on a matplotlib figure.
Parameters:
-----------
fig: matplotlib figure
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
# Look for the last TrellisGrid instance in the layer list
last_trellis = None
for layer in self.layers:
if isinstance(layer, TrellisGrid):
last_trellis = layer
if last_trellis is None:
# We have a simple, non-trellised plot
new_layers = sequence_layers(self.layers)
for layer in new_layers:
layer.work(fig=fig)
legend = {}
for layer in new_layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
else:
# We have a trellised plot.
# First let's remove all other TrellisGrid instances from the layer list,
# including this one.
new_layers = []
for layer in self.layers:
if not isinstance(layer, TrellisGrid):
new_layers.append(layer)
new_layers = sequence_layers(new_layers)
# Now replace the old layers by their trellised versions
new_layers = last_trellis.trellis(new_layers)
# Prepare the subplots and draw on them
new_layers = sequence_grids(new_layers)
axes_grids = [work_grid(grid, fig) for grid in new_layers]
axes_grid = axes_grids[-1]
adjust_subplots(fig, axes_grid, last_trellis, new_layers[-1])
# And we're done
return fig
| apache-2.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/metrics/tests/test_score_objects.py | 1 | 15967 | import numbers
import os
import pickle
import shutil
import tempfile
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.cluster import KMeans
from sklearn.datasets import load_diabetes
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.dummy import DummyRegressor
from sklearn.externals import joblib
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
| mit |
boada/astLib | astLib/astImages.py | 1 | 50443 | """module for simple .fits image tasks (rotation, clipping out sections, making .pngs etc.)
(c) 2007-2014 Matt Hilton
U{http://astlib.sourceforge.net}
Some routines in this module will fail if, e.g., asked to clip a section from a
.fits image at a position not found within the image (as determined using the
WCS). Where this occurs, the function will return None. An error message will
be printed to the console when this happens if astImages.REPORT_ERRORS=True
(the default). Testing if an astImages function returns None can be used to
handle errors in scripts.
"""
import os
#import sys
import math
from astLib import astWCS
import numpy
REPORT_ERRORS = True
# So far as I can tell in astropy 0.4 the API is the same as pyfits for what we
# need...
try:
from astropy.io import fits as pyfits
except:
try:
import pyfits
except:
raise Exception("couldn't import either pyfits or astropy.io.fits")
try:
from scipy import ndimage
from scipy import interpolate
except ImportError:
print("WARNING: astImages: failed to import scipy.ndimage - some "
"functions will not work.")
try:
import matplotlib
from matplotlib import pylab
matplotlib.interactive(False)
except ImportError:
print("WARNING: astImages: failed to import matplotlib - some functions "
"will not work.")
#-----------------------------------------------------------------------------
def clipImageSectionWCS(imageData,
imageWCS,
RADeg,
decDeg,
clipSizeDeg,
returnWCS=True):
"""Clips a square or rectangular section from an image array at the given
celestial coordinates. An updated WCS for the clipped section is optionally
returned, as well as the x, y pixel coordinates in the original image
corresponding to the clipped section.
Note that the clip size is specified in degrees on the sky. For projections
that have varying real pixel scale across the map (e.g. CEA), use
L{clipUsingRADecCoords} instead.
@type imageData: numpy array
@param imageData: image data array
@type imageWCS: astWCS.WCS
@param imageWCS: astWCS.WCS object
@type RADeg: float
@param RADeg: coordinate in decimal degrees
@type decDeg: float
@param decDeg: coordinate in decimal degrees
@type clipSizeDeg: float or list in format [widthDeg, heightDeg]
@param clipSizeDeg: if float, size of square clipped section in decimal
degrees; if list,
size of clipped section in degrees in x, y axes of image respectively
@type returnWCS: bool
@param returnWCS: if True, return an updated WCS for the clipped section
@rtype: dictionary
@return: clipped image section (numpy array), updated astWCS WCS object for
clipped image section, and coordinates of clipped section in imageData in
format {'data', 'wcs', 'clippedSection'}.
"""
imHeight = imageData.shape[0]
imWidth = imageData.shape[1]
xImScale = imageWCS.getXPixelSizeDeg()
yImScale = imageWCS.getYPixelSizeDeg()
if type(clipSizeDeg) == float:
xHalfClipSizeDeg = clipSizeDeg / 2.0
yHalfClipSizeDeg = xHalfClipSizeDeg
elif type(clipSizeDeg) == list or type(clipSizeDeg) == tuple:
xHalfClipSizeDeg = clipSizeDeg[0] / 2.0
yHalfClipSizeDeg = clipSizeDeg[1] / 2.0
else:
raise Exception("did not understand clipSizeDeg: should be float, or "
"[widthDeg, heightDeg]")
xHalfSizePix = xHalfClipSizeDeg / xImScale
yHalfSizePix = yHalfClipSizeDeg / yImScale
cPixCoords = imageWCS.wcs2pix(RADeg, decDeg)
cTopLeft = [cPixCoords[0] + xHalfSizePix, cPixCoords[1] + yHalfSizePix]
cBottomRight = [cPixCoords[0] - xHalfSizePix, cPixCoords[1] - yHalfSizePix]
X = [int(round(cTopLeft[0])), int(round(cBottomRight[0]))]
Y = [int(round(cTopLeft[1])), int(round(cBottomRight[1]))]
X.sort()
Y.sort()
if X[0] < 0:
X[0] = 0
if X[1] > imWidth:
X[1] = imWidth
if Y[0] < 0:
Y[0] = 0
if Y[1] > imHeight:
Y[1] = imHeight
clippedData = imageData[Y[0]:Y[1], X[0]:X[1]]
# Update WCS
if returnWCS:
try:
oldCRPIX1 = imageWCS.header['CRPIX1']
oldCRPIX2 = imageWCS.header['CRPIX2']
clippedWCS = imageWCS.copy()
clippedWCS.header['NAXIS1'] = clippedData.shape[1]
clippedWCS.header['NAXIS2'] = clippedData.shape[0]
try:
clippedWCS.header['CRPIX1'] = oldCRPIX1 - X[0]
except TypeError:
clippedWCS.header['CRPIX1'] = float(oldCRPIX1) - X[0]
try:
clippedWCS.header['CRPIX2'] = oldCRPIX2 - Y[0]
except TypeError:
clippedWCS.header['CRPIX2'] = float(oldCRPIX2) - Y[0]
clippedWCS.updateFromHeader()
except KeyError:
if REPORT_ERRORS:
print("WARNING: astImages.clipImageSectionWCS() : no CRPIX1, "
"CRPIX2 keywords found - not updating clipped image "
"WCS.")
clippedData = imageData[Y[0]:Y[1], X[0]:X[1]]
clippedWCS = imageWCS.copy()
else:
clippedWCS = None
return {'data': clippedData,
'wcs': clippedWCS,
'clippedSection': [X[0], X[1], Y[0], Y[1]]}
#-----------------------------------------------------------------------------
def clipImageSectionPix(imageData, XCoord, YCoord, clipSizePix):
"""Clips a square or rectangular section from an image array at the given
pixel coordinates.
@type imageData: numpy array
@param imageData: image data array
@type XCoord: float
@param XCoord: coordinate in pixels
@type YCoord: float
@param YCoord: coordinate in pixels
@type clipSizePix: float or list in format [widthPix, heightPix]
@param clipSizePix: if float, size of square clipped section in pixels; if
list, size of clipped section in pixels in x, y axes of output image
respectively
@rtype: numpy array
@return: clipped image section
"""
imHeight = imageData.shape[0]
imWidth = imageData.shape[1]
if type(clipSizePix) == float or type(clipSizePix) == int:
xHalfClipSizePix = int(round(clipSizePix / 2.0))
yHalfClipSizePix = xHalfClipSizePix
elif type(clipSizePix) == list or type(clipSizePix) == tuple:
xHalfClipSizePix = int(round(clipSizePix[0] / 2.0))
yHalfClipSizePix = int(round(clipSizePix[1] / 2.0))
else:
raise Exception("did not understand clipSizePix: should be float, or "
"[widthPix, heightPix]")
cTopLeft = [XCoord + xHalfClipSizePix, YCoord + yHalfClipSizePix]
cBottomRight = [XCoord - xHalfClipSizePix, YCoord - yHalfClipSizePix]
X = [int(round(cTopLeft[0])), int(round(cBottomRight[0]))]
Y = [int(round(cTopLeft[1])), int(round(cBottomRight[1]))]
X.sort()
Y.sort()
if X[0] < 0:
X[0] = 0
if X[1] > imWidth:
X[1] = imWidth
if Y[0] < 0:
Y[0] = 0
if Y[1] > imHeight:
Y[1] = imHeight
return imageData[Y[0]:Y[1], X[0]:X[1]]
#-----------------------------------------------------------------------------
def clipRotatedImageSectionWCS(imageData,
imageWCS,
RADeg,
decDeg,
clipSizeDeg,
returnWCS=True):
"""Clips a square or rectangular section from an image array at the given
celestial coordinates. The resulting clip is rotated and/or flipped such
that North is at the top, and East appears at the left. An updated WCS for
the clipped section is also returned. Note that the alignment of the
rotated WCS is currently not perfect - however, it is probably good enough
in most cases for use with L{ImagePlot} for plotting purposes.
Note that the clip size is specified in degrees on the sky. For projections
that have varying real pixel scale across the map (e.g. CEA), use
L{clipUsingRADecCoords} instead.
@type imageData: numpy array
@param imageData: image data array
@type imageWCS: astWCS.WCS
@param imageWCS: astWCS.WCS object
@type RADeg: float
@param RADeg: coordinate in decimal degrees
@type decDeg: float
@param decDeg: coordinate in decimal degrees
@type clipSizeDeg: float
@param clipSizeDeg: if float, size of square clipped section in decimal
degrees; if list, size of clipped section in degrees in RA, dec. axes of
output rotated image respectively
@type returnWCS: bool
@param returnWCS: if True, return an updated WCS for the clipped section
@rtype: dictionary
@return: clipped image section (numpy array), updated astWCS WCS object for
clipped image section, in format {'data', 'wcs'}.
@note: Returns 'None' if the requested position is not found within the
image. If the imaged WCS does not have keywords of the form CD1_1 etc., the
output WCS will not be rotated.
"""
halfImageSize = imageWCS.getHalfSizeDeg()
imageCentre = imageWCS.getCentreWCSCoords()
#imScale = imageWCS.getPixelSizeDeg()
if type(clipSizeDeg) == float:
xHalfClipSizeDeg = clipSizeDeg / 2.0
yHalfClipSizeDeg = xHalfClipSizeDeg
elif type(clipSizeDeg) == list or type(clipSizeDeg) == tuple:
xHalfClipSizeDeg = clipSizeDeg[0] / 2.0
yHalfClipSizeDeg = clipSizeDeg[1] / 2.0
else:
raise Exception("did not understand clipSizeDeg: should be float, or "
"[widthDeg, heightDeg]")
diagonalHalfSizeDeg = math.sqrt((xHalfClipSizeDeg * xHalfClipSizeDeg) +
(yHalfClipSizeDeg * yHalfClipSizeDeg))
#diagonalHalfSizePix = diagonalHalfSizeDeg / imScale
if RADeg > imageCentre[0] - halfImageSize[0] and RADeg < imageCentre[0] + \
halfImageSize[0] and decDeg > imageCentre[1] - halfImageSize[1] and \
decDeg < imageCentre[1] + halfImageSize[1]:
imageDiagonalClip = clipImageSectionWCS(
imageData, imageWCS, RADeg, decDeg, diagonalHalfSizeDeg * 2.0)
diagonalClip = imageDiagonalClip['data']
diagonalWCS = imageDiagonalClip['wcs']
rotDeg = diagonalWCS.getRotationDeg()
imageRotated = ndimage.rotate(diagonalClip, rotDeg)
if diagonalWCS.isFlipped() == 1:
imageRotated = pylab.fliplr(imageRotated)
# Handle WCS rotation
rotatedWCS = diagonalWCS.copy()
rotRadians = math.radians(rotDeg)
if returnWCS:
try:
CD11 = rotatedWCS.header['CD1_1']
CD21 = rotatedWCS.header['CD2_1']
CD12 = rotatedWCS.header['CD1_2']
CD22 = rotatedWCS.header['CD2_2']
if rotatedWCS.isFlipped() == 1:
CD11 = CD11 * -1
CD12 = CD12 * -1
CDMatrix = numpy.array([[CD11, CD12], [CD21, CD22]],
dtype=numpy.float64)
rotRadians = rotRadians
rot11 = math.cos(rotRadians)
rot12 = math.sin(rotRadians)
rot21 = -math.sin(rotRadians)
rot22 = math.cos(rotRadians)
rotMatrix = numpy.array([[rot11, rot12], [rot21, rot22]],
dtype=numpy.float64)
newCDMatrix = numpy.dot(rotMatrix, CDMatrix)
P1 = diagonalWCS.header['CRPIX1']
P2 = diagonalWCS.header['CRPIX2']
V1 = diagonalWCS.header['CRVAL1']
V2 = diagonalWCS.header['CRVAL2']
PMatrix = numpy.zeros((2, ), dtype=numpy.float64)
PMatrix[0] = P1
PMatrix[1] = P2
# BELOW IS HOW TO WORK OUT THE NEW REF PIXEL
CMatrix = numpy.array(
[imageRotated.shape[1] / 2.0, imageRotated.shape[0] / 2.0])
centreCoords = diagonalWCS.getCentreWCSCoords()
alphaRad = math.radians(centreCoords[0])
deltaRad = math.radians(centreCoords[1])
thetaRad = math.asin(math.sin(deltaRad) *
math.sin(math.radians(V2)) +
math.cos(deltaRad) *
math.cos(math.radians(V2)) *
math.cos(alphaRad - math.radians(V1)))
phiRad = math.atan2(-math.cos(deltaRad) *
math.sin(alphaRad - math.radians(V1)),
math.sin(deltaRad) *
math.cos(math.radians(V2)) -
math.cos(deltaRad) *
math.sin(math.radians(V2)) *
math.cos(alphaRad - math.radians(V1))) + \
math.pi
RTheta = (180.0 / math.pi) * (1.0 / math.tan(thetaRad))
xy = numpy.zeros((2, ), dtype=numpy.float64)
xy[0] = RTheta * math.sin(phiRad)
xy[1] = -RTheta * math.cos(phiRad)
newPMatrix = CMatrix - numpy.dot(
numpy.linalg.inv(newCDMatrix), xy)
# But there's a small offset to CRPIX due to the rotatedImage
# being rounded to an integer
# number of pixels (not sure this helps much)
#d=numpy.dot(rotMatrix, [diagonalClip.shape[1],
#diagonalClip.shape[0]])
#offset=abs(d)-numpy.array(imageRotated.shape)
rotatedWCS.header['NAXIS1'] = imageRotated.shape[1]
rotatedWCS.header['NAXIS2'] = imageRotated.shape[0]
rotatedWCS.header['CRPIX1'] = newPMatrix[0]
rotatedWCS.header['CRPIX2'] = newPMatrix[1]
rotatedWCS.header['CRVAL1'] = V1
rotatedWCS.header['CRVAL2'] = V2
rotatedWCS.header['CD1_1'] = newCDMatrix[0][0]
rotatedWCS.header['CD2_1'] = newCDMatrix[1][0]
rotatedWCS.header['CD1_2'] = newCDMatrix[0][1]
rotatedWCS.header['CD2_2'] = newCDMatrix[1][1]
rotatedWCS.updateFromHeader()
except KeyError:
if REPORT_ERRORS:
print("WARNING: astImages.clipRotatedImageSectionWCS() : "
"no CDi_j keywords found - not rotating WCS.")
imageRotated = diagonalClip
rotatedWCS = diagonalWCS
imageRotatedClip = clipImageSectionWCS(imageRotated, rotatedWCS, RADeg,
decDeg, clipSizeDeg)
if returnWCS:
return {'data': imageRotatedClip['data'],
'wcs': imageRotatedClip['wcs']}
else:
return {'data': imageRotatedClip['data'], 'wcs': None}
else:
if REPORT_ERRORS:
print("""ERROR: astImages.clipRotatedImageSectionWCS() :
RADeg, decDeg are not within imageData.""")
return None
#-----------------------------------------------------------------------------
def clipUsingRADecCoords(imageData,
imageWCS,
RAMin,
RAMax,
decMin,
decMax,
returnWCS=True):
"""Clips a section from an image array at the pixel coordinates
corresponding to the given celestial coordinates.
@type imageData: numpy array
@param imageData: image data array
@type imageWCS: astWCS.WCS
@param imageWCS: astWCS.WCS object
@type RAMin: float
@param RAMin: minimum RA coordinate in decimal degrees
@type RAMax: float
@param RAMax: maximum RA coordinate in decimal degrees
@type decMin: float
@param decMin: minimum dec coordinate in decimal degrees
@type decMax: float
@param decMax: maximum dec coordinate in decimal degrees
@type returnWCS: bool
@param returnWCS: if True, return an updated WCS for the clipped section
@rtype: dictionary
@return: clipped image section (numpy array), updated astWCS WCS object for
clipped image section, and corresponding pixel coordinates in imageData in
format {'data', 'wcs', 'clippedSection'}.
@note: Returns 'None' if the requested position is not found within the
image.
"""
imHeight = imageData.shape[0]
imWidth = imageData.shape[1]
xMin, yMin = imageWCS.wcs2pix(RAMin, decMin)
xMax, yMax = imageWCS.wcs2pix(RAMax, decMax)
xMin = int(round(xMin))
xMax = int(round(xMax))
yMin = int(round(yMin))
yMax = int(round(yMax))
X = [xMin, xMax]
X.sort()
Y = [yMin, yMax]
Y.sort()
if X[0] < 0:
X[0] = 0
if X[1] > imWidth:
X[1] = imWidth
if Y[0] < 0:
Y[0] = 0
if Y[1] > imHeight:
Y[1] = imHeight
clippedData = imageData[Y[0]:Y[1], X[0]:X[1]]
# Update WCS
if returnWCS:
try:
oldCRPIX1 = imageWCS.header['CRPIX1']
oldCRPIX2 = imageWCS.header['CRPIX2']
clippedWCS = imageWCS.copy()
clippedWCS.header['NAXIS1'] = clippedData.shape[1]
clippedWCS.header['NAXIS2'] = clippedData.shape[0]
clippedWCS.header['CRPIX1'] = oldCRPIX1 - X[0]
clippedWCS.header['CRPIX2'] = oldCRPIX2 - Y[0]
clippedWCS.updateFromHeader()
except KeyError:
if REPORT_ERRORS:
print("WARNING: astImages.clipUsingRADecCoords() : no CRPIX1, "
"CRPIX2 keywords found - not updating clipped image"
"WCS.")
clippedData = imageData[Y[0]:Y[1], X[0]:X[1]]
clippedWCS = imageWCS.copy()
else:
clippedWCS = None
return {'data': clippedData,
'wcs': clippedWCS,
'clippedSection': [X[0], X[1], Y[0], Y[1]]}
#-----------------------------------------------------------------------------
def scaleImage(imageData, imageWCS, scaleFactor):
"""Scales image array and WCS by the given scale factor.
@type imageData: numpy array
@param imageData: image data array
@type imageWCS: astWCS.WCS
@param imageWCS: astWCS.WCS object
@type scaleFactor: float or list or tuple
@param scaleFactor: factor to resize image by - if tuple or list, in format
[x scale factor, y scale factor]
@rtype: dictionary
@return: image data (numpy array), updated astWCS WCS object for image, in
format {'data', 'wcs'}.
"""
if type(scaleFactor) == int or type(scaleFactor) == float:
scaleFactor = [float(scaleFactor), float(scaleFactor)]
scaledData = ndimage.zoom(imageData, scaleFactor)
# Take care of offset due to rounding in scaling image to integer pixel
# dimensions
properDimensions = numpy.array(imageData.shape) * scaleFactor
offset = properDimensions - numpy.array(scaledData.shape)
# Rescale WCS
try:
oldCRPIX1 = imageWCS.header['CRPIX1']
oldCRPIX2 = imageWCS.header['CRPIX2']
CD11 = imageWCS.header['CD1_1']
CD21 = imageWCS.header['CD2_1']
CD12 = imageWCS.header['CD1_2']
CD22 = imageWCS.header['CD2_2']
except KeyError:
# Try the older FITS header format
try:
oldCRPIX1 = imageWCS.header['CRPIX1']
oldCRPIX2 = imageWCS.header['CRPIX2']
CD11 = imageWCS.header['CDELT1']
CD21 = 0
CD12 = 0
CD22 = imageWCS.header['CDELT2']
except KeyError:
if REPORT_ERRORS:
print("WARNING: astImages.rescaleImage() : no CDij or CDELT "
"keywords found - not updating WCS.")
scaledWCS = imageWCS.copy()
return {'data': scaledData, 'wcs': scaledWCS}
CDMatrix = numpy.array([[CD11, CD12], [CD21, CD22]], dtype=numpy.float64)
scaleFactorMatrix = numpy.array(
[[1.0 / scaleFactor[0], 0], [0, 1.0 / scaleFactor[1]]])
scaledCDMatrix = numpy.dot(scaleFactorMatrix, CDMatrix)
scaledWCS = imageWCS.copy()
scaledWCS.header['NAXIS1'] = scaledData.shape[1]
scaledWCS.header['NAXIS2'] = scaledData.shape[0]
scaledWCS.header['CRPIX1'] = oldCRPIX1 * scaleFactor[0] + offset[1]
scaledWCS.header['CRPIX2'] = oldCRPIX2 * scaleFactor[1] + offset[0]
scaledWCS.header['CD1_1'] = scaledCDMatrix[0][0]
scaledWCS.header['CD2_1'] = scaledCDMatrix[1][0]
scaledWCS.header['CD1_2'] = scaledCDMatrix[0][1]
scaledWCS.header['CD2_2'] = scaledCDMatrix[1][1]
scaledWCS.updateFromHeader()
return {'data': scaledData, 'wcs': scaledWCS}
#---------------------------------------------------------------------------
def intensityCutImage(imageData, cutLevels):
"""Creates a matplotlib.pylab plot of an image array with the specified
cuts in intensity applied. This routine is used by L{saveBitmap} and
L{saveContourOverlayBitmap}, which both produce output as .png, .jpg, etc.
images.
@type imageData: numpy array
@param imageData: image data array
@type cutLevels: list
@param cutLevels: sets the image scaling - available options:
- pixel values: cutLevels=[low value, high value].
- histogram equalisation: cutLevels=["histEq", number of bins ( e.g.
1024)]
- relative: cutLevels=["relative", cut per cent level (e.g. 99.5)]
- smart: cutLevels=["smart", cut per cent level (e.g. 99.5)]
["smart", 99.5] seems to provide good scaling over a range of different
images.
@rtype: dictionary
@return: image section (numpy.array), matplotlib image normalisation
(matplotlib.colors.Normalize), in the format {'image', 'norm'}.
@note: If cutLevels[0] == "histEq", then only {'image'} is returned.
"""
# Optional histogram equalisation
if cutLevels[0] == "histEq":
imageData = histEq(imageData, cutLevels[1])
anorm = pylab.normalize(imageData.min(), imageData.max())
elif cutLevels[0] == "relative":
# this turns image data into 1D array then sorts
sorted = numpy.sort(numpy.ravel(imageData))
maxValue = sorted.max()
minValue = sorted.min()
# want to discard the top and bottom specified
topCutIndex = len(sorted - 1) - \
int(math.floor(float((100.0 - cutLevels[1]) / 100.0) *
len(sorted - 1)))
bottomCutIndex = int(math.ceil(float((100.0 - cutLevels[1]) / 100.0) *
len(sorted - 1)))
topCut = sorted[topCutIndex]
bottomCut = sorted[bottomCutIndex]
anorm = pylab.normalize(bottomCut, topCut)
elif cutLevels[0] == "smart":
# this turns image data into 1Darray then sorts
sorted = numpy.sort(numpy.ravel(imageData))
maxValue = sorted.max()
minValue = sorted.min()
numBins = 10000 # 0.01 per cent accuracy
binWidth = (maxValue - minValue) / float(numBins)
histogram = ndimage.histogram(sorted, minValue, maxValue, numBins)
# Find the bin with the most pixels in it, set that as our minimum
# Then search through the bins until we get to a bin with more/or the
# same number of pixels in it than the previous one.
# We take that to be the maximum.
# This means that we avoid the traps of big, bright, saturated stars
# that cause
# problems for relative scaling
backgroundValue = histogram.max()
foundBackgroundBin = False
foundTopBin = False
lastBin = -10000
for i in range(len(histogram)):
if histogram[i] >= lastBin and foundBackgroundBin:
# Added a fudge here to stop us picking for top bin a bin within
# 10 percent of the background pixel value
if (minValue + (binWidth * i)) > bottomBinValue * 1.1:
topBinValue = minValue + (binWidth * i)
foundTopBin = True
break
if histogram[i] == backgroundValue and not foundBackgroundBin:
bottomBinValue = minValue + (binWidth * i)
foundBackgroundBin = True
lastBin = histogram[i]
if not foundTopBin:
topBinValue = maxValue
#Now we apply relative scaling to this
smartClipped = numpy.clip(sorted, bottomBinValue, topBinValue)
topCutIndex = len(smartClipped - 1) - \
int(math.floor(float((100.0 - cutLevels[1]) / 100.0) *
len(smartClipped - 1)))
bottomCutIndex = int(math.ceil(float((100.0 - cutLevels[1]) / 100.0) *
len(smartClipped - 1)))
topCut = smartClipped[topCutIndex]
bottomCut = smartClipped[bottomCutIndex]
anorm = pylab.normalize(bottomCut, topCut)
else:
# Normalise using given cut levels
anorm = pylab.normalize(cutLevels[0], cutLevels[1])
if cutLevels[0] == "histEq":
return {'image': imageData.copy()}
else:
return {'image': imageData.copy(), 'norm': anorm}
#-----------------------------------------------------------------------------
def resampleToTanProjection(imageData,
imageWCS,
outputPixDimensions=[600, 600]):
"""Resamples an image and WCS to a tangent plane projection. Purely for
plotting purposes (e.g., ensuring RA, dec. coordinate axes perpendicular).
@type imageData: numpy array
@param imageData: image data array
@type imageWCS: astWCS.WCS
@param imageWCS: astWCS.WCS object
@type outputPixDimensions: list
@param outputPixDimensions: [width, height] of output image in pixels
@rtype: dictionary
@return: image data (numpy array), updated astWCS WCS object for image, in
format {'data', 'wcs'}.
"""
RADeg, decDeg = imageWCS.getCentreWCSCoords()
#xPixelScale = imageWCS.getXPixelSizeDeg()
#yPixelScale = imageWCS.getYPixelSizeDeg()
xSizeDeg, ySizeDeg = imageWCS.getFullSizeSkyDeg()
xSizePix = int(round(outputPixDimensions[0]))
ySizePix = int(round(outputPixDimensions[1]))
xRefPix = xSizePix / 2.0
yRefPix = ySizePix / 2.0
xOutPixScale = xSizeDeg / xSizePix
#yOutPixScale = ySizeDeg / ySizePix
newHead = pyfits.Header()
newHead['NAXIS'] = 2
newHead['NAXIS1'] = xSizePix
newHead['NAXIS2'] = ySizePix
newHead['CTYPE1'] = 'RA---TAN'
newHead['CTYPE2'] = 'DEC--TAN'
newHead['CRVAL1'] = RADeg
newHead['CRVAL2'] = decDeg
newHead['CRPIX1'] = xRefPix + 1
newHead['CRPIX2'] = yRefPix + 1
newHead['CDELT1'] = -xOutPixScale
newHead['CDELT2'] = xOutPixScale # Makes more sense to use same pix scale
newHead['CUNIT1'] = 'DEG'
newHead['CUNIT2'] = 'DEG'
newWCS = astWCS.WCS(newHead, mode='pyfits')
newImage = numpy.zeros([ySizePix, xSizePix])
tanImage = resampleToWCS(newImage,
newWCS,
imageData,
imageWCS,
highAccuracy=True,
onlyOverlapping=False)
return tanImage
#------------------------------------------------------------------------------
def resampleToWCS(im1Data,
im1WCS,
im2Data,
im2WCS,
highAccuracy=False,
onlyOverlapping=True):
"""Resamples data corresponding to second image (with data im2Data, WCS
im2WCS) onto the WCS of the first image (im1Data, im1WCS). The output,
resampled image is of the pixel same dimensions of the first image. This
routine is for assisting in plotting - performing photometry on the output
is not recommended.
Set highAccuracy == True to sample every corresponding pixel in each image;
otherwise only every nth pixel (where n is the ratio of the image scales)
will be sampled, with values in between being set using a linear
interpolation (much faster).
Set onlyOverlapping == True to speed up resampling by only resampling the
overlapping area defined by both image WCSs.
@type im1Data: numpy array
@param im1Data: image data array for first image
@type im1WCS: astWCS.WCS
@param im1WCS: astWCS.WCS object corresponding to im1Data
@type im2Data: numpy array
@param im2Data: image data array for second image (to be resampled to match
first image)
@type im2WCS: astWCS.WCS
@param im2WCS: astWCS.WCS object corresponding to im2Data
@type highAccuracy: bool
@param highAccuracy: if True, sample every corresponding pixel in each
image; otherwise, sample
every nth pixel, where n = the ratio of the image scales.
@type onlyOverlapping: bool
@param onlyOverlapping: if True, only consider the overlapping area defined
by both image WCSs (speeds things up)
@rtype: dictionary
@return: numpy image data array and associated WCS in format {'data', 'wcs'}
"""
resampledData = numpy.zeros(im1Data.shape)
# Find overlap - speed things up
# But have a border so as not to require the overlap to be perfect
# There's also no point in oversampling image 1 if it's much higher res
# than image 2
xPixRatio = (im2WCS.getXPixelSizeDeg() / im1WCS.getXPixelSizeDeg()) / 2.0
yPixRatio = (im2WCS.getYPixelSizeDeg() / im1WCS.getYPixelSizeDeg()) / 2.0
xBorder = xPixRatio * 10.0
yBorder = yPixRatio * 10.0
if not highAccuracy:
if xPixRatio > 1:
xPixStep = int(math.ceil(xPixRatio))
else:
xPixStep = 1
if yPixRatio > 1:
yPixStep = int(math.ceil(yPixRatio))
else:
yPixStep = 1
else:
xPixStep = 1
yPixStep = 1
if onlyOverlapping:
overlap = astWCS.findWCSOverlap(im1WCS, im2WCS)
xOverlap = [overlap['wcs1Pix'][0], overlap['wcs1Pix'][1]]
yOverlap = [overlap['wcs1Pix'][2], overlap['wcs1Pix'][3]]
xOverlap.sort()
yOverlap.sort()
xMin = int(math.floor(xOverlap[0] - xBorder))
xMax = int(math.ceil(xOverlap[1] + xBorder))
yMin = int(math.floor(yOverlap[0] - yBorder))
yMax = int(math.ceil(yOverlap[1] + yBorder))
xRemainder = (xMax - xMin) % xPixStep
yRemainder = (yMax - yMin) % yPixStep
if xRemainder != 0:
xMax = xMax + xRemainder
if yRemainder != 0:
yMax = yMax + yRemainder
# Check that we're still within the image boundaries, to be on the safe
# side
if xMin < 0:
xMin = 0
if xMax > im1Data.shape[1]:
xMax = im1Data.shape[1]
if yMin < 0:
yMin = 0
if yMax > im1Data.shape[0]:
yMax = im1Data.shape[0]
else:
xMin = 0
xMax = im1Data.shape[1]
yMin = 0
yMax = im1Data.shape[0]
for x in range(xMin, xMax, xPixStep):
for y in range(yMin, yMax, yPixStep):
RA, dec = im1WCS.pix2wcs(x, y)
x2, y2 = im2WCS.wcs2pix(RA, dec)
x2 = int(round(x2))
y2 = int(round(y2))
if x2 >= 0 and x2 < im2Data.shape[
1] and y2 >= 0 and y2 < im2Data.shape[0]:
resampledData[y][x] = im2Data[y2][x2]
# linear interpolation
if not highAccuracy:
for row in range(resampledData.shape[0]):
vals = resampledData[row, numpy.arange(xMin, xMax, xPixStep)]
index2data = interpolate.interp1d(
numpy.arange(0, vals.shape[0], 1), vals)
interpedVals = index2data(numpy.arange(0, vals.shape[0] - 1, 1.0 /
xPixStep))
resampledData[row, xMin:xMin + interpedVals.shape[
0]] = interpedVals
for col in range(resampledData.shape[1]):
vals = resampledData[numpy.arange(yMin, yMax, yPixStep), col]
index2data = interpolate.interp1d(
numpy.arange(0, vals.shape[0], 1), vals)
interpedVals = index2data(numpy.arange(0, vals.shape[0] - 1, 1.0 /
yPixStep))
resampledData[yMin:yMin + interpedVals.shape[0],
col] = interpedVals
# Note: should really just copy im1WCS keywords into im2WCS and return
# that
# Only a problem if we're using this for anything other than plotting
return {'data': resampledData, 'wcs': im1WCS.copy()}
#---------------------------------------------------------------------------
def generateContourOverlay(backgroundImageData, backgroundImageWCS,
contourImageData, contourImageWCS, contourLevels,
contourSmoothFactor=0, highAccuracy=False):
"""Rescales an image array to be used as a contour overlay to have the same
dimensions as the background image, and generates a set of contour levels.
The image array from which the contours are to be generated will be
resampled to the same dimensions as the background image data, and can be
optionally smoothed using a Gaussian filter. The sigma of the Gaussian
filter (contourSmoothFactor) is specified in arcsec.
@type backgroundImageData: numpy array
@param backgroundImageData: background image data array
@type backgroundImageWCS: astWCS.WCS
@param backgroundImageWCS: astWCS.WCS object of the background image data
array
@type contourImageData: numpy array
@param contourImageData: image data array from which contours are to be
generated
@type contourImageWCS: astWCS.WCS
@param contourImageWCS: astWCS.WCS object corresponding to contourImageData
@type contourLevels: list
@param contourLevels: sets the contour levels - available options:
- values: contourLevels=[list of values specifying each level]
- linear spacing: contourLevels=['linear', min level value, max level
value, number of levels] - can use "min", "max" to automatically set
min, max levels from image data
- log spacing: contourLevels=['log', min level value, max level value,
number of levels] - can use "min", "max" to automatically set min, max
levels from image data
@type contourSmoothFactor: float
@param contourSmoothFactor: standard deviation (in arcsec) of Gaussian
filter for pre-smoothing of contour image data (set to 0 for no smoothing)
@type highAccuracy: bool
@param highAccuracy: if True, sample every corresponding pixel in each
image; otherwise, sample every nth pixel, where n = the ratio of the image
scales.
"""
# For compromise between speed and accuracy, scale a copy of the background
# image down to a scale that is one pixel = 1/5 of a pixel in the contour
# image
# But only do this if it has CDij keywords as we know how to scale those
if ("CD1_1" in backgroundImageWCS.header):
xScaleFactor = backgroundImageWCS.getXPixelSizeDeg() / (
contourImageWCS.getXPixelSizeDeg() / 5.0)
yScaleFactor = backgroundImageWCS.getYPixelSizeDeg() / (
contourImageWCS.getYPixelSizeDeg() / 5.0)
scaledBackground = scaleImage(backgroundImageData, backgroundImageWCS,
(xScaleFactor, yScaleFactor))
scaled = resampleToWCS(scaledBackground['data'],
scaledBackground['wcs'],
contourImageData,
contourImageWCS,
highAccuracy=highAccuracy)
scaledContourData = scaled['data']
scaledContourWCS = scaled['wcs']
scaledBackground = True
else:
scaled = resampleToWCS(backgroundImageData,
backgroundImageWCS,
contourImageData,
contourImageWCS,
highAccuracy=highAccuracy)
scaledContourData = scaled['data']
scaledContourWCS = scaled['wcs']
scaledBackground = False
if contourSmoothFactor > 0:
sigmaPix = (contourSmoothFactor /
3600.0) / scaledContourWCS.getPixelSizeDeg()
scaledContourData = ndimage.gaussian_filter(scaledContourData,
sigmaPix)
# Various ways of setting the contour levels
# If just a list is passed in, use those instead
if contourLevels[0] == "linear":
if contourLevels[1] == "min":
xMin = contourImageData.flatten().min()
else:
xMin = float(contourLevels[1])
if contourLevels[2] == "max":
xMax = contourImageData.flatten().max()
else:
xMax = float(contourLevels[2])
nLevels = contourLevels[3]
xStep = (xMax - xMin) / (nLevels - 1)
cLevels = []
for j in range(nLevels + 1):
level = xMin + j * xStep
cLevels.append(level)
elif contourLevels[0] == "log":
if contourLevels[1] == "min":
xMin = contourImageData.flatten().min()
else:
xMin = float(contourLevels[1])
if contourLevels[2] == "max":
xMax = contourImageData.flatten().max()
else:
xMax = float(contourLevels[2])
if xMin <= 0.0:
raise Exception(
"minimum contour level set to <= 0 and log scaling chosen.")
xLogMin = math.log10(xMin)
xLogMax = math.log10(xMax)
nLevels = contourLevels[3]
xLogStep = (xLogMax - xLogMin) / (nLevels - 1)
cLevels = []
for j in range(nLevels + 1):
level = math.pow(10, xLogMin + j * xLogStep)
cLevels.append(level)
else:
cLevels = contourLevels
# Now blow the contour image data back up to the size of the original image
if scaledBackground:
scaledBack = scaleImage(scaledContourData, scaledContourWCS, (
1.0 / xScaleFactor, 1.0 / yScaleFactor))['data']
else:
scaledBack = scaledContourData
return {'scaledImage': scaledBack, 'contourLevels': cLevels}
#---------------------------------------------------------------------------
def saveBitmap(outputFileName, imageData, cutLevels, size, colorMapName):
"""Makes a bitmap image from an image array; the image format is specified
by the filename extension. (e.g. ".jpg" =JPEG, ".png"=PNG).
@type outputFileName: string
@param outputFileName: filename of output bitmap image
@type imageData: numpy array
@param imageData: image data array
@type cutLevels: list
@param cutLevels: sets the image scaling - available options:
- pixel values: cutLevels=[low value, high value].
- histogram equalisation: cutLevels=["histEq", number of bins ( e.g.
1024)]
- relative: cutLevels=["relative", cut per cent level (e.g. 99.5)]
- smart: cutLevels=["smart", cut per cent level (e.g. 99.5)]
["smart", 99.5] seems to provide good scaling over a range of different
images.
@type size: int
@param size: size of output image in pixels
@type colorMapName: string
@param colorMapName: name of a standard matplotlib colormap, e.g. "hot",
"cool", "gray" etc. (do "help(pylab.colormaps)" in the Python interpreter
to see available options)
"""
cut = intensityCutImage(imageData, cutLevels)
# Make plot
aspectR = float(cut['image'].shape[0]) / float(cut['image'].shape[1])
pylab.figure(figsize=(10, 10 * aspectR))
pylab.axes([0, 0, 1, 1])
try:
colorMap = pylab.cm.get_cmap(colorMapName)
except AssertionError:
raise Exception(colorMapName +
" is not a defined matplotlib colormap.")
if cutLevels[0] == "histEq":
pylab.imshow(cut['image'],
interpolation="bilinear",
origin='lower',
cmap=colorMap)
else:
pylab.imshow(cut['image'],
interpolation="bilinear",
norm=cut['norm'],
origin='lower',
cmap=colorMap)
pylab.axis("off")
pylab.savefig("out_astImages.png")
pylab.close("all")
try:
from PIL import Image
except:
raise Exception("astImages.saveBitmap requires the Python Imaging "
"Library to be installed.")
im = Image.open("out_astImages.png")
im.thumbnail((int(size), int(size)))
im.save(outputFileName)
os.remove("out_astImages.png")
#-----------------------------------------------------------------------------
def saveContourOverlayBitmap(outputFileName, backgroundImageData,
backgroundImageWCS, cutLevels,
size, colorMapName, contourImageData,
contourImageWCS,
contourSmoothFactor, contourLevels, contourColor,
contourWidth):
"""Makes a bitmap image from an image array, with a set of contours
generated from a second image array overlaid. The image format is specified
by the file extension (e.g. ".jpg"=JPEG, ".png"=PNG). The image array from
which the contours are to be generated can optionally be pre-smoothed using
a Gaussian filter.
@type outputFileName: string
@param outputFileName: filename of output bitmap image
@type backgroundImageData: numpy array
@param backgroundImageData: background image data array
@type backgroundImageWCS: astWCS.WCS
@param backgroundImageWCS: astWCS.WCS object of the background image data
array
@type cutLevels: list
@param cutLevels: sets the image scaling - available options:
- pixel values: cutLevels=[low value, high value].
- histogram equalisation: cutLevels=["histEq", number of bins ( e.g.
1024)]
- relative: cutLevels=["relative", cut per cent level (e.g. 99.5)]
- smart: cutLevels=["smart", cut per cent level (e.g. 99.5)]
["smart", 99.5] seems to provide good scaling over a range of different
images.
@type size: int
@param size: size of output image in pixels
@type colorMapName: string
@param colorMapName: name of a standard matplotlib colormap, e.g. "hot",
"cool", "gray" etc. (do "help(pylab.colormaps)" in the Python interpreter
to see available options)
@type contourImageData: numpy array
@param contourImageData: image data array from which contours are to be
generated
@type contourImageWCS: astWCS.WCS
@param contourImageWCS: astWCS.WCS object corresponding to contourImageData
@type contourSmoothFactor: float
@param contourSmoothFactor: standard deviation (in pixels) of Gaussian
filter for pre-smoothing of contour image data (set to 0 for no smoothing)
@type contourLevels: list
@param contourLevels: sets the contour levels - available options:
- values: contourLevels=[list of values specifying each level]
- linear spacing: contourLevels=['linear', min level value, max level
value, number of levels] - can use "min", "max" to automatically set
min, max levels from image datad
- log spacing: contourLevels=['log', min level value, max level
value,number of levels] - can use "min", "max" to automatically set
min, max levels from image data
@type contourColor: string
@param contourColor: color of the overlaid contours, specified by the name
of a standard matplotlib color, e.g., "black", "white", "cyan" etc. (do
"help(pylab.colors)" in the Python interpreter to see available options)
@type contourWidth: int
@param contourWidth: width of the overlaid contours
"""
cut = intensityCutImage(backgroundImageData, cutLevels)
# Make plot of just the background image
aspectR = float(cut['image'].shape[0]) / float(cut['image'].shape[1])
pylab.figure(figsize=(10, 10 * aspectR))
pylab.axes([0, 0, 1, 1])
try:
colorMap = pylab.cm.get_cmap(colorMapName)
except AssertionError:
raise Exception(colorMapName +
" is not a defined matplotlib colormap.")
if cutLevels[0] == "histEq":
pylab.imshow(cut['image'],
interpolation="bilinear",
origin='lower',
cmap=colorMap)
else:
pylab.imshow(cut['image'],
interpolation="bilinear",
norm=cut['norm'],
origin='lower',
cmap=colorMap)
pylab.axis("off")
# Add the contours
contourData = generateContourOverlay(backgroundImageData,
backgroundImageWCS, contourImageData,
contourImageWCS, contourLevels,
contourSmoothFactor)
pylab.contour(contourData['scaledImage'],
contourData['contourLevels'],
colors=contourColor,
linewidths=contourWidth)
pylab.savefig("out_astImages.png")
pylab.close("all")
try:
from PIL import Image
except ImportError:
raise Exception("astImages.saveContourOverlayBitmap requires the "
"Python Imaging Library to be installed")
im = Image.open("out_astImages.png")
im.thumbnail((int(size), int(size)))
im.save(outputFileName)
os.remove("out_astImages.png")
#----------------------------------------------------------------------------
def saveFITS(outputFileName, imageData, imageWCS=None):
"""Writes an image array to a new .fits file.
@type outputFileName: string
@param outputFileName: filename of output FITS image
@type imageData: numpy array
@param imageData: image data array
@type imageWCS: astWCS.WCS object
@param imageWCS: image WCS object
@note: If imageWCS=None, the FITS image will be written with a rudimentary
header containing no meta data.
"""
if os.path.exists(outputFileName):
os.remove(outputFileName)
newImg = pyfits.HDUList()
if imageWCS is not None:
hdu = pyfits.PrimaryHDU(None, imageWCS.header)
else:
hdu = pyfits.PrimaryHDU(None, None)
hdu.data = imageData
newImg.append(hdu)
newImg.writeto(outputFileName)
newImg.close()
#----------------------------------------------------------------------------
def histEq(inputArray, numBins):
"""Performs histogram equalisation of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type numBins: int
@param numBins: number of bins in which to perform the operation (e.g. 1024)
@rtype: numpy array
@return: image data array
"""
imageData = inputArray
# histogram equalisation: we want an equal number of pixels in each
# intensity range
sortedDataIntensities = numpy.sort(numpy.ravel(imageData))
# Make cumulative histogram of data values, simple min-max used to set bin
# sizes and range
dataCumHist = numpy.zeros(numBins)
minIntensity = sortedDataIntensities.min()
maxIntensity = sortedDataIntensities.max()
histRange = maxIntensity - minIntensity
binWidth = histRange / float(numBins - 1)
for i in range(len(sortedDataIntensities)):
binNumber = int(math.ceil((sortedDataIntensities[i] - minIntensity) /
binWidth))
addArray = numpy.zeros(numBins)
onesArray = numpy.ones(numBins - binNumber)
onesRange = list(range(binNumber, numBins))
numpy.put(addArray, onesRange, onesArray)
dataCumHist = dataCumHist + addArray
# Make ideal cumulative histogram
idealValue = dataCumHist.max() / float(numBins)
idealCumHist = numpy.arange(idealValue, dataCumHist.max() + idealValue,
idealValue)
# Map the data to the ideal
for y in range(imageData.shape[0]):
for x in range(imageData.shape[1]):
# Get index corresponding to dataIntensity
intensityBin = int(math.ceil((imageData[y][x] - minIntensity) /
binWidth))
# Guard against rounding errors (happens rarely I think)
if intensityBin < 0:
intensityBin = 0
if intensityBin > len(dataCumHist) - 1:
intensityBin = len(dataCumHist) - 1
# Get the cumulative frequency corresponding intensity level in the data
dataCumFreq = dataCumHist[intensityBin]
# Get the index of the corresponding ideal cumulative frequency
idealBin = numpy.searchsorted(idealCumHist, dataCumFreq)
idealIntensity = (idealBin * binWidth) + minIntensity
imageData[y][x] = idealIntensity
return imageData
#-----------------------------------------------------------------------------
def normalise(inputArray, clipMinMax):
"""Clips the inputArray in intensity and normalises the array such that
minimum and maximum values are 0, 1. Clip in intensity is specified by
clipMinMax, a list in the format [clipMin, clipMax]
Used for normalising image arrays so that they can be turned into RGB
arrays that matplotlib can plot (see L{astPlots.ImagePlot}).
@type inputArray: numpy array
@param inputArray: image data array
@type clipMinMax: list
@param clipMinMax: [minimum value of clipped array, maximum value of
clipped array]
@rtype: numpy array
@return: normalised array with minimum value 0, maximum value 1
"""
clipped = inputArray.clip(clipMinMax[0], clipMinMax[1])
slope = 1.0 / (clipMinMax[1] - clipMinMax[0])
intercept = -clipMinMax[0] * slope
clipped = clipped * slope + intercept
return clipped
| lgpl-2.1 |
giorgiop/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 4 | 13141 | import sys
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import SkipTest
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# import reload
version = sys.version_info
if version[0] == 3:
# Python 3+ import for reload. Builtin in Python2
if version[1] == 3:
reload = None
else:
from importlib import reload
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
tested_length = min(clf_lda_svd.explained_variance_ratio_.shape[0],
clf_lda_eigen.explained_variance_ratio_.shape[0])
# NOTE: clf_lda_eigen.explained_variance_ratio_ is not of n_components
# length. Make it the same length as clf_lda_svd.explained_variance_ratio_
# before comparison.
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_[:tested_length])
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
if reload is None:
raise SkipTest("Can't reload module on Python3.3")
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
elijah513/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
pombredanne/nTLP | setup.py | 1 | 5613 | #!/usr/bin/env python
from distutils.core import setup
###########################################
# Dependency or optional-checking functions
###########################################
# (see notes below.)
def check_gr1c():
import subprocess
try:
subprocess.call(["gr1c", "-V"], stdout=subprocess.PIPE)
subprocess.call(["rg", "-V"], stdout=subprocess.PIPE)
subprocess.call(["grpatch", "-V"], stdout=subprocess.PIPE)
except OSError:
return False
return True
def check_yaml():
try:
import yaml
except ImportError:
return False
return True
def check_cvc4():
import subprocess
cmd = subprocess.Popen(['which', 'cvc4'],
stdout=subprocess.PIPE, close_fds=True)
for line in cmd.stdout:
if 'cvc4' in line:
return True
return False
def check_yices():
import subprocess
cmd = subprocess.Popen(['which', 'yices'],
stdout=subprocess.PIPE, close_fds=True)
for line in cmd.stdout:
if 'yices' in line:
return True
return False
def check_glpk():
try:
import cvxopt.glpk
except ImportError:
return False
return True
def check_gephi():
import subprocess
cmd = subprocess.Popen(['which', 'gephi'], stdout=subprocess.PIPE)
for line in cmd.stdout:
if 'gephi' in line:
return True
return False
# Handle "dry-check" argument to check for dependencies without
# installing the tulip package; checking occurs by default if
# "install" is given, unless both "install" and "nocheck" are given
# (but typical users do not need "nocheck").
# You *must* have these to run TuLiP. Each item in other_depends must
# be treated specially; thus other_depends is a dictionary with
#
# keys : names of dependency;
# values : list of callable and string, which is printed on failure
# (i.e. package not found); we interpret the return value
# True to be success, and False failure.
other_depends = {}
# These are nice to have but not necessary. Each item is of the form
#
# keys : name of optional package;
# values : list of callable and two strings, first string printed on
# success, second printed on failure (i.e. package not
# found); we interpret the return value True to be success,
# and False failure.
optionals = {'glpk' : [check_glpk, 'GLPK found.', 'GLPK seems to be missing\nand thus apparently not used by your installation of CVXOPT.\nIf you\'re interested, see http://www.gnu.org/s/glpk/'],
'gephi' : [check_gephi, 'Gephi found.', 'Gephi seems to be missing. If you\'re interested in graph visualization, see http://gephi.org/'],
'gr1c' : [check_gr1c, 'gr1c found.', 'gr1c, rg, or grpatch not found.\nIf you\'re interested in a GR(1) synthesis tool besides JTLV, see http://scottman.net/2012/gr1c'],
'PyYAML' : [check_yaml, 'PyYAML found.', 'PyYAML not found.\nTo read/write YAML, you will need to install PyYAML; see http://pyyaml.org/'],
'yices' : [check_yices, 'Yices found.', 'Yices not found.'],
'cvc4' : [check_cvc4, 'CVC4 found.', 'The SMT solver CVC4 was not found; see http://cvc4.cs.nyu.edu/\nSome functions in the rhtlp module will be unavailable.']}
import sys
perform_setup = True
check_deps = False
if 'install' in sys.argv[1:] and 'nocheck' not in sys.argv[1:]:
check_deps = True
elif 'dry-check' in sys.argv[1:]:
perform_setup = False
check_deps = True
# Pull "dry-check" and "nocheck" from argument list, if present, to play
# nicely with Distutils setup.
try:
sys.argv.remove('dry-check')
except ValueError:
pass
try:
sys.argv.remove('nocheck')
except ValueError:
pass
if check_deps:
if not perform_setup:
print "Checking for required dependencies..."
# Python package dependencies
try:
import numpy
except:
print 'ERROR: NumPy not found.'
raise
try:
import scipy
except:
print 'ERROR: SciPy not found.'
raise
try:
import cvxopt
except:
print 'ERROR: CVXOPT not found.'
raise
try:
import matplotlib
except:
print 'ERROR: matplotlib not found.'
raise
try:
import networkx
except:
print 'ERROR: NetworkX not found.'
raise
# Other dependencies
for (dep_key, dep_val) in other_depends.items():
if not dep_val[0]():
print dep_val[1]
raise Exception('Failed dependency: '+dep_key)
# Optional stuff
for (opt_key, opt_val) in optionals.items():
print 'Probing for optional '+opt_key+'...'
if opt_val[0]():
print "\t"+opt_val[1]
else:
print "\t"+opt_val[2]
if perform_setup:
from tulip import __version__ as tulip_version
setup(name = 'tulip',
version = tulip_version,
description = 'nTLP (forked from Temporal Logic Planning toolbox)',
author = 'Caltech Control and Dynamical Systems',
author_email = '[email protected]',
url = 'http://scottman.net/2013/nTLP',
license = 'BSD',
requires = ['numpy', 'scipy', 'cvxopt', 'matplotlib'],
packages = ['tulip'],
package_dir = {'tulip' : 'tulip'},
package_data={'tulip': ['jtlv_grgame.jar', 'polytope/*.py']}
)
| bsd-3-clause |
karst87/ml | 01_openlibs/tensorflow/02_tfgirls/TensorFlow-and-DeepLearning-Tutorial-master/Season1/20/dp.py | 1 | 13339 | # 新的 refined api 不支持 Python2
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import numpy as np
class Network():
def __init__(self, train_batch_size, test_batch_size, pooling_scale,
dropout_rate, base_learning_rate, decay_rate,
optimizeMethod='adam', save_path='model/default.ckpt'):
'''
@num_hidden: 隐藏层的节点数量
@batch_size:因为我们要节省内存,所以分批处理数据。每一批的数据量。
'''
self.optimizeMethod = optimizeMethod
self.dropout_rate=dropout_rate
self.base_learning_rate=base_learning_rate
self.decay_rate=decay_rate
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
# Hyper Parameters
self.conv_config = [] # list of dict
self.fc_config = [] # list of dict
self.conv_weights = []
self.conv_biases = []
self.fc_weights = []
self.fc_biases = []
self.pooling_scale = pooling_scale
self.pooling_stride = pooling_scale
# Graph Related
self.tf_train_samples = None
self.tf_train_labels = None
self.tf_test_samples = None
self.tf_test_labels = None
# 统计
self.writer = None
self.merged = None
self.train_summaries = []
self.test_summaries = []
# save 保存训练的模型
self.saver = None
self.save_path = save_path
def add_conv(self, *, patch_size, in_depth, out_depth, activation='relu', pooling=False, name):
'''
This function does not define operations in the graph, but only store config in self.conv_layer_config
'''
self.conv_config.append({
'patch_size': patch_size,
'in_depth': in_depth,
'out_depth': out_depth,
'activation': activation,
'pooling': pooling,
'name': name
})
with tf.name_scope(name):
weights = tf.Variable(
tf.truncated_normal([patch_size, patch_size, in_depth, out_depth], stddev=0.1), name=name+'_weights')
biases = tf.Variable(tf.constant(0.1, shape=[out_depth]), name=name+'_biases')
self.conv_weights.append(weights)
self.conv_biases.append(biases)
def add_fc(self, *, in_num_nodes, out_num_nodes, activation='relu', name):
'''
add fc layer config to slef.fc_layer_config
'''
self.fc_config.append({
'in_num_nodes': in_num_nodes,
'out_num_nodes': out_num_nodes,
'activation': activation,
'name': name
})
with tf.name_scope(name):
weights = tf.Variable(tf.truncated_normal([in_num_nodes, out_num_nodes], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[out_num_nodes]))
self.fc_weights.append(weights)
self.fc_biases.append(biases)
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_weights))+'_weights', weights))
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_biases))+'_biases', biases))
def apply_regularization(self, _lambda):
# L2 regularization for the fully connected parameters
regularization = 0.0
for weights, biases in zip(self.fc_weights, self.fc_biases):
regularization += tf.nn.l2_loss(weights) + tf.nn.l2_loss(biases)
# 1e5
return _lambda * regularization
# should make the definition as an exposed API, instead of implemented in the function
def define_inputs(self, *, train_samples_shape, train_labels_shape, test_samples_shape):
# 这里只是定义图谱中的各种变量
with tf.name_scope('inputs'):
self.tf_train_samples = tf.placeholder(tf.float32, shape=train_samples_shape, name='tf_train_samples')
self.tf_train_labels = tf.placeholder(tf.float32, shape=train_labels_shape, name='tf_train_labels')
self.tf_test_samples = tf.placeholder(tf.float32, shape=test_samples_shape, name='tf_test_samples')
def define_model(self):
'''
定义我的的计算图谱
'''
def model(data_flow, train=True):
'''
@data: original inputs
@return: logits
'''
# Define Convolutional Layers
for i, (weights, biases, config) in enumerate(zip(self.conv_weights, self.conv_biases, self.conv_config)):
with tf.name_scope(config['name'] + '_model'):
with tf.name_scope('convolution'):
# default 1,1,1,1 stride and SAME padding
data_flow = tf.nn.conv2d(data_flow, filter=weights, strides=[1, 1, 1, 1], padding='SAME')
data_flow = data_flow + biases
if not train:
self.visualize_filter_map(data_flow, how_many=config['out_depth'], display_size=32//(i//2+1), name=config['name']+'_conv')
if config['activation'] == 'relu':
data_flow = tf.nn.relu(data_flow)
if not train:
self.visualize_filter_map(data_flow, how_many=config['out_depth'], display_size=32//(i//2+1), name=config['name']+'_relu')
else:
raise Exception('Activation Func can only be Relu right now. You passed', config['activation'])
if config['pooling']:
data_flow = tf.nn.max_pool(
data_flow,
ksize=[1, self.pooling_scale, self.pooling_scale, 1],
strides=[1, self.pooling_stride, self.pooling_stride, 1],
padding='SAME')
if not train:
self.visualize_filter_map(data_flow, how_many=config['out_depth'], display_size=32//(i//2+1)//2, name=config['name']+'_pooling')
# Define Fully Connected Layers
for i, (weights, biases, config) in enumerate(zip(self.fc_weights, self.fc_biases, self.fc_config)):
if i == 0:
shape = data_flow.get_shape().as_list()
data_flow = tf.reshape(data_flow, [shape[0], shape[1] * shape[2] * shape[3]])
with tf.name_scope(config['name'] + 'model'):
### Dropout
if train and i == len(self.fc_weights) - 1:
data_flow = tf.nn.dropout(data_flow, self.dropout_rate, seed=4926)
###
data_flow = tf.matmul(data_flow, weights) + biases
if config['activation'] == 'relu':
data_flow = tf.nn.relu(data_flow)
elif config['activation'] is None:
pass
else:
raise Exception('Activation Func can only be Relu or None right now. You passed', config['activation'])
return data_flow
# Training computation.
logits = model(self.tf_train_samples)
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels))
self.loss += self.apply_regularization(_lambda=5e-4)
self.train_summaries.append(tf.scalar_summary('Loss', self.loss))
# learning rate decay
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
learning_rate=self.base_learning_rate,
global_step=global_step*self.train_batch_size,
decay_steps=100,
decay_rate=self.decay_rate,
staircase=True
)
# Optimizer.
with tf.name_scope('optimizer'):
if(self.optimizeMethod=='gradient'):
self.optimizer = tf.train \
.GradientDescentOptimizer(learning_rate) \
.minimize(self.loss)
elif(self.optimizeMethod=='momentum'):
self.optimizer = tf.train \
.MomentumOptimizer(learning_rate, 0.5) \
.minimize(self.loss)
elif(self.optimizeMethod=='adam'):
self.optimizer = tf.train \
.AdamOptimizer(learning_rate) \
.minimize(self.loss)
# Predictions for the training, validation, and test data.
with tf.name_scope('train'):
self.train_prediction = tf.nn.softmax(logits, name='train_prediction')
tf.add_to_collection("prediction", self.train_prediction)
with tf.name_scope('test'):
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples, train=False), name='test_prediction')
tf.add_to_collection("prediction", self.test_prediction)
single_shape = (1, 32, 32, 1)
single_input = tf.placeholder(tf.float32, shape=single_shape, name='single_input')
self.single_prediction = tf.nn.softmax(model(single_input, train=False), name='single_prediction')
tf.add_to_collection("prediction", self.single_prediction)
self.merged_train_summary = tf.merge_summary(self.train_summaries)
self.merged_test_summary = tf.merge_summary(self.test_summaries)
# 放在定义Graph之后,保存这张计算图
self.saver = tf.train.Saver(tf.all_variables())
def run(self, train_samples, train_labels, test_samples, test_labels, *, train_data_iterator, iteration_steps, test_data_iterator):
'''
用到Session
:data_iterator: a function that yields chuck of data
'''
self.writer = tf.train.SummaryWriter('./board', tf.get_default_graph())
with tf.Session(graph=tf.get_default_graph()) as session:
tf.initialize_all_variables().run()
### 训练
print('Start Training')
# batch 1000
for i, samples, labels in train_data_iterator(train_samples, train_labels, iteration_steps=iteration_steps, chunkSize=self.train_batch_size):
_, l, predictions, summary = session.run(
[self.optimizer, self.loss, self.train_prediction, self.merged_train_summary],
feed_dict={self.tf_train_samples: samples, self.tf_train_labels: labels}
)
self.writer.add_summary(summary, i)
# labels is True Labels
accuracy, _ = self.accuracy(predictions, labels)
if i % 50 == 0:
print('Minibatch loss at step %d: %f' % (i, l))
print('Minibatch accuracy: %.1f%%' % accuracy)
###
### 测试
accuracies = []
confusionMatrices = []
for i, samples, labels in test_data_iterator(test_samples, test_labels, chunkSize=self.test_batch_size):
result, summary = session.run(
[self.test_prediction, self.merged_test_summary],
feed_dict={self.tf_test_samples: samples}
)
self.writer.add_summary(summary, i)
accuracy, cm = self.accuracy(result, labels, need_confusion_matrix=True)
accuracies.append(accuracy)
confusionMatrices.append(cm)
print('Test Accuracy: %.1f%%' % accuracy)
print(' Average Accuracy:', np.average(accuracies))
print('Standard Deviation:', np.std(accuracies))
self.print_confusion_matrix(np.add.reduce(confusionMatrices))
###
def train(self, train_samples, train_labels, *, data_iterator, iteration_steps):
self.writer = tf.train.SummaryWriter('./board', tf.get_default_graph())
with tf.Session(graph=tf.get_default_graph()) as session:
tf.initialize_all_variables().run()
### 训练
print('Start Training')
# batch 1000
for i, samples, labels in data_iterator(train_samples, train_labels, iteration_steps=iteration_steps, chunkSize=self.train_batch_size):
_, l, predictions, summary = session.run(
[self.optimizer, self.loss, self.train_prediction, self.merged_train_summary],
feed_dict={self.tf_train_samples: samples, self.tf_train_labels: labels}
)
self.writer.add_summary(summary, i)
# labels is True Labels
accuracy, _ = self.accuracy(predictions, labels)
if i % 50 == 0:
print('Minibatch loss at step %d: %f' % (i, l))
print('Minibatch accuracy: %.1f%%' % accuracy)
###
# 检查要存放的路径值否存在。这里假定只有一层路径。
import os
if os.path.isdir(self.save_path.split('/')[0]):
save_path = self.saver.save(session, self.save_path)
print("Model saved in file: %s" % save_path)
else:
os.makedirs(self.save_path.split('/')[0])
save_path = self.saver.save(session, self.save_path)
print("Model saved in file: %s" % save_path)
def test(self, test_samples, test_labels, *, data_iterator):
if self.saver is None:
self.define_model()
if self.writer is None:
self.writer = tf.train.SummaryWriter('./board', tf.get_default_graph())
print('Before session')
with tf.Session(graph=tf.get_default_graph()) as session:
self.saver.restore(session, self.save_path)
### 测试
accuracies = []
confusionMatrices = []
for i, samples, labels in data_iterator(test_samples, test_labels, chunkSize=self.test_batch_size):
result= session.run(
self.test_prediction,
feed_dict={self.tf_test_samples: samples}
)
#self.writer.add_summary(summary, i)
accuracy, cm = self.accuracy(result, labels, need_confusion_matrix=True)
accuracies.append(accuracy)
confusionMatrices.append(cm)
print('Test Accuracy: %.1f%%' % accuracy)
print(' Average Accuracy:', np.average(accuracies))
print('Standard Deviation:', np.std(accuracies))
self.print_confusion_matrix(np.add.reduce(confusionMatrices))
###
def accuracy(self, predictions, labels, need_confusion_matrix=False):
'''
计算预测的正确率与召回率
@return: accuracy and confusionMatrix as a tuple
'''
_predictions = np.argmax(predictions, 1)
_labels = np.argmax(labels, 1)
cm = confusion_matrix(_labels, _predictions) if need_confusion_matrix else None
# == is overloaded for numpy array
accuracy = (100.0 * np.sum(_predictions == _labels) / predictions.shape[0])
return accuracy, cm
def visualize_filter_map(self, tensor, *, how_many, display_size, name):
#print(tensor.get_shape)
filter_map = tensor[-1]
#print(filter_map.get_shape())
filter_map = tf.transpose(filter_map, perm=[2, 0, 1])
#print(filter_map.get_shape())
filter_map = tf.reshape(filter_map, (how_many, display_size, display_size, 1))
#print(how_many)
self.test_summaries.append(tf.image_summary(name, tensor=filter_map, max_images=how_many))
def print_confusion_matrix(self, confusionMatrix):
print('Confusion Matrix:')
for i, line in enumerate(confusionMatrix):
print(line, line[i] / np.sum(line))
a = 0
for i, column in enumerate(np.transpose(confusionMatrix, (1, 0))):
a += (column[i] / np.sum(column)) * (np.sum(column) / 26000)
print(column[i] / np.sum(column), )
print('\n', np.sum(confusionMatrix), a)
| mit |
arabenjamin/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/widgets/rectangle_selector.py | 1 | 3047 | """
==================
Rectangle Selector
==================
Do a mouseclick somewhere, move the mouse to some destination, release
the button. This class gives click- and release-events and also draws
a line or a box from the click-point to the actual mouseposition
(within the same axes) until the button is released. Within the
method 'self.ignore()' it is checked whether the button from eventpress
and eventrelease are the same.
"""
from __future__ import print_function
from matplotlib.widgets import RectangleSelector
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
def line_select_callback(eclick, erelease):
'eclick and erelease are the press and release events'
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
print("(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2))
print(" The button you used were: %s %s" % (eclick.button, erelease.button))
def toggle_selector(event):
print(' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print(' RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print(' RectangleSelector activated.')
toggle_selector.RS.set_active(True)
fig, current_ax = plt.subplots() # make a new plotting range
N = 100000 # If N is large one can see
x = np.linspace(0.0, 10.0, N) # improvement by use blitting!
plt.plot(x, +np.sin(.2*np.pi*x), lw=3.5, c='b', alpha=.7) # plot something
plt.plot(x, +np.cos(.2*np.pi*x), lw=3.5, c='r', alpha=.5)
plt.plot(x, -np.sin(.2*np.pi*x), lw=3.5, c='g', alpha=.3)
print("\n click --> release")
# drawtype is 'box' or 'line' or 'none'
toggle_selector.RS = RectangleSelector(current_ax, line_select_callback,
drawtype='box', useblit=True,
button=[1, 3], # don't use middle button
minspanx=5, minspany=5,
spancoords='pixels',
interactive=True)
plt.connect('key_press_event', toggle_selector)
pltshow(plt)
| mit |
jmmease/pandas | pandas/tests/frame/test_join.py | 11 | 5226 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import DataFrame, Index, PeriodIndex
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@pytest.fixture
def frame_with_period_index():
return DataFrame(
data=np.arange(20).reshape(4, 5),
columns=list('abcde'),
index=PeriodIndex(start='2000', freq='A', periods=4))
@pytest.fixture
def frame():
return TestData().frame
@pytest.fixture
def left():
return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right():
return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])
@pytest.mark.parametrize(
"how, sort, expected",
[('inner', False, DataFrame({'a': [20, 10],
'b': [200, 100]},
index=[2, 1])),
('inner', True, DataFrame({'a': [10, 20],
'b': [100, 200]},
index=[1, 2])),
('left', False, DataFrame({'a': [20, 10, 0],
'b': [200, 100, np.nan]},
index=[2, 1, 0])),
('left', True, DataFrame({'a': [0, 10, 20],
'b': [np.nan, 100, 200]},
index=[0, 1, 2])),
('right', False, DataFrame({'a': [np.nan, 10, 20],
'b': [300, 100, 200]},
index=[3, 1, 2])),
('right', True, DataFrame({'a': [10, 20, np.nan],
'b': [100, 200, 300]},
index=[1, 2, 3])),
('outer', False, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3])),
('outer', True, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3]))])
def test_join(left, right, how, sort, expected):
result = left.join(right, how=how, sort=sort)
tm.assert_frame_equal(result, expected)
def test_join_index(frame):
# left / right
f = frame.loc[frame.index[:10], ['A', 'B']]
f2 = frame.loc[frame.index[5:], ['C', 'D']].iloc[::-1]
joined = f.join(f2)
tm.assert_index_equal(f.index, joined.index)
expected_columns = Index(['A', 'B', 'C', 'D'])
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='left')
tm.assert_index_equal(joined.index, f.index)
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='right')
tm.assert_index_equal(joined.index, f2.index)
tm.assert_index_equal(joined.columns, expected_columns)
# inner
joined = f.join(f2, how='inner')
tm.assert_index_equal(joined.index, f.index[5:10])
tm.assert_index_equal(joined.columns, expected_columns)
# outer
joined = f.join(f2, how='outer')
tm.assert_index_equal(joined.index, frame.index.sort_values())
tm.assert_index_equal(joined.columns, expected_columns)
tm.assert_raises_regex(
ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with tm.assert_raises_regex(ValueError, 'columns overlap but '
'no suffix'):
frame.join(frame, how=how)
def test_join_index_more(frame):
af = frame.loc[:, ['A', 'B']]
bf = frame.loc[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = frame['C'][::2]
expected['D'] = frame['D'][::2]
result = af.join(bf)
tm.assert_frame_equal(result, expected)
result = af.join(bf, how='right')
tm.assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
tm.assert_frame_equal(result, expected.loc[:, result.columns])
def test_join_index_series(frame):
df = frame.copy()
s = df.pop(frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
tm.assert_frame_equal(joined, frame, check_names=False)
s.name = None
tm.assert_raises_regex(ValueError, 'must have a name', df.join, s)
def test_join_overlap(frame):
df1 = frame.loc[:, ['A', 'B', 'C']]
df2 = frame.loc[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.loc[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.loc[:, ['B', 'C']].add_suffix('_df2')
no_overlap = frame.loc[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
tm.assert_frame_equal(joined, expected.loc[:, joined.columns])
def test_join_period_index(frame_with_period_index):
other = frame_with_period_index.rename(
columns=lambda x: '{key}{key}'.format(key=x))
joined_values = np.concatenate(
[frame_with_period_index.values] * 2, axis=1)
joined_cols = frame_with_period_index.columns.append(other.columns)
joined = frame_with_period_index.join(other)
expected = DataFrame(
data=joined_values,
columns=joined_cols,
index=frame_with_period_index.index)
tm.assert_frame_equal(joined, expected)
| bsd-3-clause |
ainafp/nilearn | plot_simulated_data.py | 1 | 5562 | """
=================================================
Example of pattern recognition on simulated data
=================================================
This example simulates data according to a very simple sketch of brain
imaging data and applies machine learning techniques to predict output
values.
"""
# Licence : BSD
print __doc__
from time import time
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn import linear_model, svm
from sklearn.utils import check_random_state
from sklearn.cross_validation import KFold
from sklearn.feature_selection import f_regression
import nibabel
from nilearn import decoding
import nilearn.masking
###############################################################################
# Function to generate data
def create_simulation_data(snr=0, n_samples=2 * 100, size=12, random_state=1):
generator = check_random_state(random_state)
roi_size = 2 # size / 3
smooth_X = 1
### Coefs
w = np.zeros((size, size, size))
w[0:roi_size, 0:roi_size, 0:roi_size] = -0.6
w[-roi_size:, -roi_size:, 0:roi_size] = 0.5
w[0:roi_size, -roi_size:, -roi_size:] = -0.6
w[-roi_size:, 0:roi_size:, -roi_size:] = 0.5
w[(size - roi_size) / 2:(size + roi_size) / 2,
(size - roi_size) / 2:(size + roi_size) / 2,
(size - roi_size) / 2:(size + roi_size) / 2] = 0.5
w = w.ravel()
### Generate smooth background noise
XX = generator.randn(n_samples, size, size, size)
noise = []
for i in range(n_samples):
Xi = ndimage.filters.gaussian_filter(XX[i, :, :, :], smooth_X)
Xi = Xi.ravel()
noise.append(Xi)
noise = np.array(noise)
### Generate the signal y
y = generator.randn(n_samples)
X = np.dot(y[:, np.newaxis], w[np.newaxis])
norm_noise = linalg.norm(X, 2) / np.exp(snr / 20.)
noise_coef = norm_noise / linalg.norm(noise, 2)
noise *= noise_coef
snr = 20 * np.log(linalg.norm(X, 2) / linalg.norm(noise, 2))
print ("SNR: %.1f dB" % snr)
### Mixing of signal + noise and splitting into train/test
X += noise
X -= X.mean(axis=-1)[:, np.newaxis]
X /= X.std(axis=-1)[:, np.newaxis]
X_test = X[n_samples / 2:, :]
X_train = X[:n_samples / 2, :]
y_test = y[n_samples / 2:]
y = y[:n_samples / 2]
return X_train, X_test, y, y_test, snr, noise, w, size
def plot_slices(data, title=None):
plt.figure(figsize=(5.5, 2.2))
vmax = np.abs(data).max()
for i in (0, 6, 11):
plt.subplot(1, 3, i / 5 + 1)
plt.imshow(data[:, :, i], vmin=-vmax, vmax=vmax,
interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(hspace=0.05, wspace=0.05, left=.03, right=.97, top=.9)
if title is not None:
plt.suptitle(title, y=.95)
###############################################################################
# Create data
X_train, X_test, y_train, y_test, snr, _, coefs, size = \
create_simulation_data(snr=-10, n_samples=100, size=12)
# Create masks for SearchLight. process_mask is the voxels where SearchLight
# computation is performed. It is a subset of the brain mask, just to reduce
# computation time.
mask = np.ones((size, size, size), np.bool)
mask_img = nibabel.Nifti1Image(mask.astype(np.int), np.eye(4))
process_mask = np.zeros((size, size, size), np.bool)
process_mask[:, :, 0] = True
process_mask[:, :, 6] = True
process_mask[:, :, 11] = True
process_mask_img = nibabel.Nifti1Image(process_mask.astype(np.int), np.eye(4))
coefs = np.reshape(coefs, [size, size, size])
plot_slices(coefs, title="Ground truth")
###############################################################################
# Compute the results and estimated coef maps for different estimators
classifiers = [
('bayesian_ridge', linear_model.BayesianRidge(normalize=True)),
('enet_cv', linear_model.ElasticNetCV(alphas=[5, 1, 0.5, 0.1],
l1_ratio=0.05)),
('ridge_cv', linear_model.RidgeCV(alphas=[100, 10, 1, 0.1], cv=5)),
('svr', svm.SVR(kernel='linear', C=0.001)),
('searchlight', decoding.SearchLight(
mask_img, process_mask_img=process_mask_img,
radius=2.7, scoring='r2', estimator=svm.SVR(kernel="linear"),
cv=KFold(y_train.size, n_folds=4),
verbose=1, n_jobs=1))
]
# Run the estimators
for name, classifier in classifiers:
t1 = time()
if name != "searchlight":
classifier.fit(X_train, y_train)
else:
X = nilearn.masking.unmask(X_train, mask_img)
classifier.fit(X, y_train)
del X
elapsed_time = time() - t1
if name != 'searchlight':
coefs = classifier.coef_
coefs = np.reshape(coefs, [size, size, size])
score = classifier.score(X_test, y_test)
title = '%s: prediction score %.3f, training time: %.2fs' % (
classifier.__class__.__name__, score,
elapsed_time)
else: # Searchlight
coefs = classifier.scores_
title = '%s: training time: %.2fs' % (
classifier.__class__.__name__,
elapsed_time)
# We use the plot_slices function provided in the example to
# plot the results
plot_slices(coefs, title=title)
print title
f_values, p_values = f_regression(X_train, y_train)
p_values = np.reshape(p_values, (size, size, size))
p_values = -np.log10(p_values)
p_values[np.isnan(p_values)] = 0
p_values[p_values > 10] = 10
plot_slices(p_values, title="f_regress")
plt.show()
| bsd-3-clause |
lucyparsons/OpenOversight | OpenOversight/tests/test_commands.py | 1 | 38743 | import csv
import datetime
import operator
import os
import traceback
import uuid
from click.testing import CliRunner
from sqlalchemy.orm.exc import MultipleResultsFound
import pandas as pd
import pytest
from OpenOversight.app.commands import (
add_department,
add_job_title,
bulk_add_officers,
advanced_csv_import,
create_officer_from_row,
)
from OpenOversight.app.models import (
Assignment,
Department,
Incident,
Job,
Link,
Officer,
Salary,
Unit,
)
from OpenOversight.app.utils import get_officer
from OpenOversight.tests.conftest import RANK_CHOICES_1, generate_officer
def run_command_print_output(cli, args=None, **kwargs):
"""
This function runs the given command with the provided arguments
and returns a `result` object. The most relevant part of that object is
the exit_code, were 0 indicates a successful run of the command and
any other value signifies a failure.
Additionally this function will send all generated logs to stdout
and will print exceptions and strack-trace to make it easier to debug
a failing
"""
runner = CliRunner()
result = runner.invoke(cli, args=args, **kwargs)
print(result.output)
print(result.stderr_bytes)
if result.exception is not None:
print(result.exception)
print(traceback.print_exception(*result.exc_info))
return result
def test_add_department__success(session):
name = "Added Police Department"
short_name = "APD"
unique_internal_identifier = "30ad0au239eas939asdj"
# add department via command line
result = run_command_print_output(
add_department, [name, short_name, unique_internal_identifier]
)
# command ran successful
assert result.exit_code == 0
# department was added to database
departments = Department.query.filter_by(
unique_internal_identifier_label=unique_internal_identifier
).all()
assert len(departments) == 1
department = departments[0]
assert department.name == name
assert department.short_name == short_name
def test_add_department__duplicate(session):
name = "Duplicate Department"
short_name = "DPD"
department = Department(name=name, short_name=short_name)
session.add(department)
session.commit()
# adding department of same name via command
result = run_command_print_output(
add_department, [name, short_name, "2320wea0s9d03eas"]
)
# fails because Department with this name already exists
assert result.exit_code != 0
assert result.exception is not None
def test_add_department__missing_argument(session):
# running add-department command missing one argument
result = run_command_print_output(add_department, ["Name of Department"])
# fails because short name is required argument
assert result.exit_code != 0
assert result.exception is not None
def test_add_job_title__success(session, department):
department_id = department.id
job_title = "Police Officer"
is_sworn = True
order = 1
# run command to add job title
result = run_command_print_output(
add_job_title, [str(department_id), job_title, str(is_sworn), str(order)]
)
assert result.exit_code == 0
# confirm that job title was added to database
jobs = Job.query.filter_by(department_id=department_id, job_title=job_title).all()
assert len(jobs) == 1
job = jobs[0]
assert job.job_title == job_title
assert job.is_sworn_officer == is_sworn
assert job.order == order
def test_add_job_title__duplicate(session, department):
job_title = "Police Officer"
is_sworn = True
order = 1
job = Job(
job_title=job_title,
is_sworn_officer=is_sworn,
order=order,
department=department,
)
session.add(job)
session.commit()
# adding exact same job again via command
result = run_command_print_output(
add_department, [str(department.id), job_title, str(is_sworn), str(order)]
)
# fails because this job already exists
assert result.exit_code != 0
assert result.exception is not None
def test_add_job_title__different_departments(session, department):
other_department = Department(name="Other Police Department", short_name="OPD")
session.add(other_department)
session.commit()
other_department_id = other_department.id
job_title = "Police Officer"
is_sworn = True
order = 1
job = Job(
job_title=job_title,
is_sworn_officer=is_sworn,
order=order,
department=department,
)
session.add(job)
session.commit()
# adding samme job but for different department
result = run_command_print_output(
add_job_title, [str(other_department_id), job_title, str(is_sworn), str(order)]
)
# success because this department doesn't have that title yet
assert result.exit_code == 0
jobs = Job.query.filter_by(
department_id=other_department_id, job_title=job_title
).all()
assert len(jobs) == 1
job = jobs[0]
assert job.job_title == job_title
assert job.is_sworn_officer == is_sworn
assert job.order == order
def test_csv_import_new(csvfile):
# Delete all current officers
Officer.query.delete()
assert Officer.query.count() == 0
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created > 0
assert Officer.query.count() == n_created
assert n_updated == 0
def test_csv_import_update(csvfile):
n_existing = Officer.query.count()
assert n_existing > 0
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created == 0
assert n_updated == 0
assert Officer.query.count() == n_existing
def test_csv_import_idempotence(csvfile):
# Delete all current officers
Officer.query.delete()
assert Officer.query.count() == 0
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created > 0
assert n_updated == 0
officer_count = Officer.query.count()
assert officer_count == n_created
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created == 0
assert n_updated == 0
assert Officer.query.count() == officer_count
def test_csv_missing_required_field(csvfile):
df = pd.read_csv(csvfile)
df.drop(columns="first_name").to_csv(csvfile)
with pytest.raises(Exception) as exc:
bulk_add_officers([csvfile])
assert "Missing required field" in str(exc.value)
def test_csv_missing_badge_and_uid(csvfile):
df = pd.read_csv(csvfile)
df.drop(columns=["star_no", "unique_internal_identifier"]).to_csv(csvfile)
with pytest.raises(Exception) as exc:
bulk_add_officers([csvfile])
assert (
"CSV file must include either badge numbers or unique identifiers for officers"
in str(exc.value)
)
def test_csv_non_existant_dept_id(csvfile):
df = pd.read_csv(csvfile)
df["department_id"] = 666
df.to_csv(csvfile)
with pytest.raises(Exception) as exc:
bulk_add_officers([csvfile])
assert "Department ID 666 not found" in str(exc.value)
def test_csv_officer_missing_badge_and_uid(csvfile):
df = pd.read_csv(csvfile)
df.loc[0, "star_no"] = None
df.loc[0, "unique_internal_identifier"] = None
df.to_csv(csvfile)
with pytest.raises(Exception) as exc:
bulk_add_officers([csvfile])
assert "missing badge number and unique identifier" in str(exc.value)
def test_csv_changed_static_field(csvfile):
df = pd.read_csv(csvfile)
df.loc[0, "birth_year"] = 666
df.to_csv(csvfile)
with pytest.raises(Exception) as exc:
bulk_add_officers([csvfile])
assert "has differing birth_year field" in str(exc.value)
def test_csv_new_assignment(csvfile):
# Delete all current officers and assignments
Assignment.query.delete()
Officer.query.delete()
assert Officer.query.count() == 0
df = pd.read_csv(csvfile)
df.loc[0, "job_title"] = "Commander"
df.to_csv(csvfile)
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created > 0
assert n_updated == 0
assert Officer.query.count() == n_created
officer = get_officer(
1, df.loc[0, "star_no"], df.loc[0, "first_name"], df.loc[0, "last_name"]
)
assert officer
officer_id = officer.id
assert len(list(officer.assignments)) == 1
# Update job_title
df.loc[0, "job_title"] = "CAPTAIN"
df.to_csv(csvfile)
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created == 0
assert n_updated == 1
officer = Officer.query.filter_by(id=officer_id).one()
assert len(list(officer.assignments)) == 2
for assignment in officer.assignments:
assert (
assignment.job.job_title == "Commander"
or assignment.job.job_title == "CAPTAIN"
)
def test_csv_new_name(csvfile):
df = pd.read_csv(csvfile)
officer_uid = df.loc[0, "unique_internal_identifier"]
assert officer_uid
df.loc[0, "first_name"] = "FOO"
df.to_csv(csvfile)
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created == 0
assert n_updated == 1
officer = Officer.query.filter_by(unique_internal_identifier=officer_uid).one()
assert officer.first_name == "FOO"
def test_csv_new_officer(csvfile):
df = pd.read_csv(csvfile)
n_rows = len(df.index)
assert n_rows > 0
n_officers = Officer.query.count()
assert n_officers > 0
new_uid = str(uuid.uuid4())
new_officer = { # Must match fields in csvfile
"department_id": 1,
"unique_internal_identifier": new_uid,
"first_name": "FOO",
"last_name": "BAR",
"middle_initial": None,
"suffix": None,
"gender": "F",
"race": "BLACK",
"employment_date": None,
"birth_year": None,
"star_no": 666,
"job_title": "CAPTAIN",
"unit": None,
"star_date": None,
"resign_date": None,
"salary": 1.23,
"salary_year": 2019,
"salary_is_fiscal_year": True,
"overtime_pay": 4.56,
}
df = df.append([new_officer])
df.to_csv(csvfile)
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created == 1
assert n_updated == 0
officer = Officer.query.filter_by(unique_internal_identifier=new_uid).one()
assert officer.first_name == "FOO"
assert Officer.query.count() == n_officers + 1
def test_csv_new_salary(csvfile):
# Delete all current officers and salaries
Salary.query.delete()
Officer.query.delete()
assert Officer.query.count() == 0
df = pd.read_csv(csvfile)
df.loc[0, "salary"] = "123456.78"
df.to_csv(csvfile)
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created > 0
assert n_updated == 0
officer_count = Officer.query.count()
assert officer_count == n_created
officer = get_officer(
1, df.loc[0, "star_no"], df.loc[0, "first_name"], df.loc[0, "last_name"]
)
assert officer
officer_id = officer.id
assert len(list(officer.salaries)) == 1
# Update salary
df.loc[0, "salary"] = "150000"
df.to_csv(csvfile)
assert Officer.query.count() > 0
n_created, n_updated = bulk_add_officers([csvfile], standalone_mode=False)
assert n_created == 0
assert n_updated == 1
assert Officer.query.count() == officer_count
officer = Officer.query.filter_by(id=officer_id).one()
assert len(list(officer.salaries)) == 2
for salary in officer.salaries:
assert float(salary.salary) == 123456.78 or float(salary.salary) == 150000.00
def test_bulk_add_officers__success(session, department_with_ranks, csv_path):
# generate two officers with different names
first_officer = generate_officer()
first_officer.department = department_with_ranks
print(Job.query.all())
print(Job.query.filter_by(department=department_with_ranks).all())
job = (
Job.query.filter_by(department=department_with_ranks).filter_by(order=1)
).first()
fo_fn = "Uniquefirst"
first_officer.first_name = fo_fn
fo_ln = first_officer.last_name
session.add(first_officer)
session.commit()
assignment = Assignment(baseofficer=first_officer, job_id=job.id)
session.add(assignment)
session.commit()
different_officer = generate_officer()
different_officer.department = department_with_ranks
different_officer.job = job
do_fn = different_officer.first_name
do_ln = different_officer.last_name
session.add(different_officer)
assignment = Assignment(baseofficer=different_officer, job=job)
session.add(assignment)
session.commit()
department_id = department_with_ranks.id
# generate csv to update one existing officer and add one new
new_officer_first_name = "Newofficer"
new_officer_last_name = "Name"
fieldnames = [
"department_id",
"first_name",
"last_name",
"job_title",
]
with open(csv_path, "w") as f:
csv_writer = csv.DictWriter(f, fieldnames=fieldnames)
csv_writer.writeheader()
csv_writer.writerow(
{
"department_id": department_id,
"first_name": first_officer.first_name,
"last_name": first_officer.last_name,
"job_title": RANK_CHOICES_1[2],
}
)
csv_writer.writerow(
{
"department_id": department_id,
"first_name": new_officer_first_name,
"last_name": new_officer_last_name,
"job_title": RANK_CHOICES_1[1],
}
)
# run command with generated csv
result = run_command_print_output(bulk_add_officers, [csv_path, "--update-by-name"])
# command had no errors & exceptions
assert result.exit_code == 0
assert result.exception is None
# make sure that exactly three officers are assigned to the department now
# and the first officer has two assignments stored (one original one
# and one updated via csv)
officer_query = Officer.query.filter_by(department_id=department_id)
officers = officer_query.all()
assert len(officers) == 3
first_officer_db = officer_query.filter_by(first_name=fo_fn, last_name=fo_ln).one()
assert {asmt.job.job_title for asmt in first_officer_db.assignments.all()} == {
RANK_CHOICES_1[2],
RANK_CHOICES_1[1],
}
different_officer_db = officer_query.filter_by(
first_name=do_fn, last_name=do_ln
).one()
assert [asmt.job.job_title for asmt in different_officer_db.assignments.all()] == [
RANK_CHOICES_1[1]
]
new_officer_db = officer_query.filter_by(
first_name=new_officer_first_name, last_name=new_officer_last_name
).one()
assert [asmt.job.job_title for asmt in new_officer_db.assignments.all()] == [
RANK_CHOICES_1[1]
]
def test_bulk_add_officers__duplicate_name(session, department, csv_path):
# two officers with the same name
first_name = "James"
last_name = "Smith"
first_officer = generate_officer()
first_officer.department = department
first_officer.first_name = first_name
first_officer.last_name = last_name
session.add(first_officer)
session.commit()
different_officer = generate_officer()
different_officer.department = department
different_officer.first_name = first_name
different_officer.last_name = last_name
session.add(different_officer)
session.commit()
# a csv that refers to that name
fieldnames = [
"department_id",
"first_name",
"last_name",
"star_no",
]
with open(csv_path, "w") as f:
csv_writer = csv.DictWriter(f, fieldnames=fieldnames)
csv_writer.writeheader()
csv_writer.writerow(
{
"department_id": department.id,
"first_name": "James",
"last_name": "Smith",
"star_no": 1234,
}
)
# run command with generated csv and --update-by-name flag set
result = run_command_print_output(bulk_add_officers, [csv_path, "--update-by-name"])
# command does not execute successfully since the name is not unique
assert result.exit_code != 0
# command throws MultipleResultsFound error
assert isinstance(result.exception, MultipleResultsFound)
def test_bulk_add_officers__write_static_null_field(session, department, csv_path):
# start with an officer whose birth_year is missing
officer = generate_officer()
officer.birth_year = None
officer.department = department
session.add(officer)
session.commit()
fo_uuid = officer.unique_internal_identifier
birth_year = 1983
fieldnames = [
"department_id",
"first_name",
"last_name",
"unique_internal_identifier",
"birth_year",
]
# generate csv that provides birth_year for that officer
with open(csv_path, "w") as f:
csv_writer = csv.DictWriter(f, fieldnames=fieldnames)
csv_writer.writeheader()
csv_writer.writerow(
{
"department_id": department.id,
"first_name": officer.first_name,
"last_name": officer.last_name,
"unique_internal_identifier": fo_uuid,
"birth_year": birth_year,
}
)
# run command no flags set
result = run_command_print_output(bulk_add_officers, [csv_path])
# command successful
assert result.exit_code == 0
assert result.exception is None
# officer information is updated in the database
officer = Officer.query.filter_by(unique_internal_identifier=fo_uuid).one()
assert officer.birth_year == birth_year
def test_bulk_add_officers__write_static_field_no_flag(session, department, csv_path):
# officer with birth year set
officer = generate_officer()
old_birth_year = 1979
officer.birth_year = old_birth_year
officer.department = department
session.add(officer)
session.commit()
fo_uuid = officer.unique_internal_identifier
new_birth_year = 1983
fieldnames = [
"department_id",
"first_name",
"last_name",
"unique_internal_identifier",
"birth_year",
]
# generate csv that assigns different birth year to that officer
with open(csv_path, "w") as f:
csv_writer = csv.DictWriter(f, fieldnames=fieldnames)
csv_writer.writeheader()
csv_writer.writerow(
{
"department_id": department.id,
"first_name": officer.first_name,
"last_name": officer.last_name,
"unique_internal_identifier": fo_uuid,
"birth_year": new_birth_year,
}
)
# run command, no flag
result = run_command_print_output(bulk_add_officers, [csv_path])
# command fails because birth year is a static field and cannot be changed
# without --update-static-fields set
assert result.exit_code != 0
assert result.exception is not None
# officer still has original birth year
officer = Officer.query.filter_by(unique_internal_identifier=fo_uuid).one()
assert officer.birth_year == old_birth_year
def test_bulk_add_officers__write_static_field__flag_set(session, department, csv_path):
# officer with birth year set
officer = generate_officer()
officer.birth_year = 1979
officer.department = department
session.add(officer)
session.commit()
officer_uuid = officer.unique_internal_identifier
new_birth_year = 1983
fieldnames = [
"department_id",
"first_name",
"last_name",
"unique_internal_identifier",
"birth_year",
]
# generate csv assigning different birth year to that officer
with open(csv_path, "w") as f:
csv_writer = csv.DictWriter(f, fieldnames=fieldnames)
csv_writer.writeheader()
csv_writer.writerow(
{
"department_id": department.id,
"first_name": officer.first_name,
"last_name": officer.last_name,
"unique_internal_identifier": officer_uuid,
"birth_year": new_birth_year,
}
)
# run command with --update-static-fields set to allow
# overwriting of birth year even if already present
result = run_command_print_output(
bulk_add_officers, [csv_path, "--update-static-fields"]
)
assert result.exit_code == 0
assert result.exception is None
# confirm that officer's birth year was updated in database
officer = Officer.query.filter_by(unique_internal_identifier=officer_uuid).one()
assert officer.birth_year == new_birth_year
def test_bulk_add_officers__no_create_flag(session, department, csv_path):
# department with one officer
department_id = department.id
officer = generate_officer()
officer.gender = None
officer.department = department
session.add(officer)
session.commit()
officer_uuid = officer.unique_internal_identifier
officer_gender_updated = "M"
fieldnames = [
"department_id",
"first_name",
"last_name",
"unique_internal_identifier",
"gender",
]
# generate csv that updates gender of officer already in database
# and provides data for another (new) officer
with open(csv_path, "w") as f:
csv_writer = csv.DictWriter(f, fieldnames=fieldnames)
csv_writer.writeheader()
csv_writer.writerow(
{
"department_id": department_id,
"first_name": officer.first_name,
"last_name": officer.last_name,
"unique_internal_identifier": officer_uuid,
"gender": officer_gender_updated,
}
)
csv_writer.writerow(
{
"department_id": department_id,
"first_name": "NewOfficer",
"last_name": "NotInDatabase",
"unique_internal_identifier": uuid.uuid4(),
"gender": "M",
}
)
# run bulk_add_officers command with --no-create flag set
# so no new officers are created. Those that do not exist are
# simply ignored
result = run_command_print_output(bulk_add_officers, [csv_path, "--no-create"])
assert result.exit_code == 0
assert result.exception is None
# confirm that only one officer is in database and information was updated
officer = Officer.query.filter_by(department_id=department_id).one()
assert officer.unique_internal_identifier == officer_uuid
assert officer.gender == officer_gender_updated
def test_advanced_csv_import__success(session, department_with_ranks, test_csv_dir):
# make sure department name aligns with the csv files
assert department_with_ranks.name == "Springfield Police Department"
# set up existing data
officer = Officer(
id=49483,
department_id=1,
first_name="Already",
last_name="InDatabase",
birth_year=1951,
)
session.add(officer)
assignment = Assignment(
id=77021,
officer_id=officer.id,
star_no="4567",
star_date=datetime.date(2020, 1, 1),
job_id=department_with_ranks.jobs[0].id,
)
session.add(assignment)
salary = Salary(
id=33001, salary=30000, officer_id=officer.id, year=2018, is_fiscal_year=False,
)
session.add(salary)
incident = Incident(
id=123456,
report_number="Old_Report_Number",
department_id=1,
description="description",
time=datetime.time(23, 45, 16),
)
incident.officers = [officer]
session.add(incident)
link = Link(id=55051, title="Existing Link", url="https://www.example.org")
session.add(link)
officer.links = [link]
# run command with the csv files in the test_csvs folder
result = run_command_print_output(
advanced_csv_import,
[
str(department_with_ranks.name),
"--officers-csv",
os.path.join(test_csv_dir, "officers.csv"),
"--assignments-csv",
os.path.join(test_csv_dir, "assignments.csv"),
"--salaries-csv",
os.path.join(test_csv_dir, "salaries.csv"),
"--links-csv",
os.path.join(test_csv_dir, "links.csv"),
"--incidents-csv",
os.path.join(test_csv_dir, "incidents.csv"),
],
)
# command did not fail
assert result.exception is None
assert result.exit_code == 0
print(list(Officer.query.all()))
all_officers = {
officer.unique_internal_identifier: officer
for officer in Officer.query.filter_by(department_id=1).all()
}
# make sure all the data is imported as expected
cop1 = all_officers["UID-1"]
assert cop1.first_name == "Mark"
assert cop1.last_name == "Smith"
assert cop1.gender == "M"
assert cop1.race == "WHITE"
assert cop1.employment_date == datetime.date(2019, 7, 12)
assert cop1.birth_year == 1984
assert cop1.middle_initial == "O"
assert cop1.suffix is None
salary_2018, salary_2019 = sorted(cop1.salaries, key=operator.attrgetter("year"))
assert salary_2018.year == 2018
assert salary_2018.salary == 10000
assert salary_2018.is_fiscal_year is True
assert salary_2018.overtime_pay is None
assert salary_2019.salary == 10001
assignment_po, assignment_cap = sorted(
cop1.assignments, key=operator.attrgetter("star_date")
)
assert assignment_po.star_no == "1234"
assert assignment_po.star_date == datetime.date(2019, 7, 12)
assert assignment_po.resign_date == datetime.date(2020, 1, 1)
assert assignment_po.job.job_title == "Police Officer"
assert assignment_po.unit_id is None
assert assignment_cap.star_no == "2345"
assert assignment_cap.job.job_title == "Captain"
cop2 = all_officers["UID-2"]
assert cop2.first_name == "Claire"
assert cop2.last_name == "Fuller"
assert cop2.suffix == "III"
assert len(cop2.salaries) == 1
assert cop2.salaries[0].salary == 20000
assert len(cop2.assignments.all()) == 1
assert cop2.assignments[0].job.job_title == "Commander"
cop3 = all_officers["UID-3"]
assert cop3.first_name == "Robert"
assert cop3.last_name == "Brown"
assert len(cop3.assignments.all()) == 0
assert len(cop3.salaries) == 0
cop4 = all_officers["UID-4"]
assert cop4.id == 49483
assert cop4.first_name == "Already"
assert cop4.birth_year == 1952
assert cop4.gender == "Other"
assert cop4.salaries[0].salary == 50000
assert len(cop4.assignments.all()) == 2
updated_assignment, new_assignment = sorted(
cop4.assignments, key=operator.attrgetter("star_date")
)
assert updated_assignment.job.job_title == "Police Officer"
assert updated_assignment.resign_date == datetime.date(2020, 7, 10)
assert updated_assignment.star_no == "4567"
assert new_assignment.job.job_title == "Captain"
assert new_assignment.star_date == datetime.date(2020, 7, 10)
assert new_assignment.star_no == "54321"
incident = cop4.incidents[0]
assert incident.report_number == "CR-1234"
license_plates = {plate.state: plate.number for plate in incident.license_plates}
assert license_plates["NY"] == "ABC123"
assert license_plates["IL"] == "98UMC"
incident2 = Incident.query.filter_by(report_number="CR-9912").one()
address = incident2.address
assert address.street_name == "Fake Street"
assert address.cross_street1 == "Main Street"
assert address.cross_street2 is None
assert address.city == "Chicago"
assert address.state == "IL"
assert address.zip_code == "60603"
assert incident2.officers == [cop1]
incident3 = Incident.query.get(123456)
assert incident3.report_number == "CR-39283"
assert incident3.description == "Don't know where it happened"
assert incident3.officers == [cop1]
assert incident3.date == datetime.date(2020, 7, 26)
lp = incident3.license_plates[0]
assert lp.number == "XYZ11"
assert lp.state is None
assert incident3.address is None
assert incident3.time is None
link_new = cop4.links[0]
assert [link_new] == list(cop1.links)
assert link_new.title == "A Link"
assert link_new.url == "https://www.example.com"
assert {officer.id for officer in link_new.officers} == {cop1.id, cop4.id}
incident_link = incident2.links[0]
assert incident_link.url == "https://www.example.com/incident"
assert incident_link.title == "Another Link"
assert incident_link.author == "Example Times"
updated_link = Link.query.get(55051)
assert updated_link.title == "Updated Link"
assert updated_link.officers == []
assert updated_link.incidents == [incident3]
def _create_csv(data, path, csv_file_name):
csv_path = os.path.join(str(path), csv_file_name)
field_names = set().union(*[set(row.keys()) for row in data])
with open(csv_path, "w") as f:
csv_writer = csv.DictWriter(f, field_names)
csv_writer.writeheader()
csv_writer.writerows(data)
return csv_path
def test_advanced_csv_import__force_create(session, department_with_ranks, tmp_path):
tmp_path = str(tmp_path)
department_name = department_with_ranks.name
other_department = Department(name="Other department", short_name="OPD")
session.add(other_department)
officer = Officer(
id=99001,
department_id=other_department.id,
first_name="Already",
last_name="InDatabase",
)
session.add(officer)
session.flush()
# create temporary csv files
officers_data = [
{
"id": 99001,
"department_name": department_name,
"last_name": "Test",
"first_name": "First",
},
{
"id": 99002,
"department_name": department_name,
"last_name": "Test",
"first_name": "Second",
},
{
"id": 99003,
"department_name": department_name,
"last_name": "Test",
"first_name": "Third",
},
]
officers_csv = _create_csv(officers_data, tmp_path, "officers.csv")
assignments_data = [
{
"id": 98001,
"officer_id": 99002,
"job title": RANK_CHOICES_1[1],
"badge number": "12345",
"start date": "2020-07-24",
}
]
assignments_csv = _create_csv(assignments_data, tmp_path, "assignments.csv")
salaries_data = [{"id": 77001, "officer_id": 99003, "year": 2019, "salary": 98765}]
salaries_csv = _create_csv(salaries_data, tmp_path, "salaries.csv")
incidents_data = [
{
"id": 66001,
"officer_ids": "99002|99001",
"department_name": department_name,
"street_name": "Fake Street",
}
]
incidents_csv = _create_csv(incidents_data, tmp_path, "incidents.csv")
links_data = [
{
"id": 55001,
"officer_ids": "99001",
"incident_ids": "",
"url": "https://www.example.org/3629",
}
]
links_csv = _create_csv(links_data, tmp_path, "links.csv")
# run command with --force-create
result = run_command_print_output(
advanced_csv_import,
[
str(department_with_ranks.name),
"--officers-csv",
officers_csv,
"--assignments-csv",
assignments_csv,
"--salaries-csv",
salaries_csv,
"--incidents-csv",
incidents_csv,
"--links-csv",
links_csv,
"--force-create",
],
)
# make sure command did not fail
assert result.exception is None
assert result.exit_code == 0
# make sure all the data is imported as expected
cop1 = Officer.query.get(99001)
assert cop1.first_name == "First"
cop2 = Officer.query.get(99002)
assert cop2.assignments[0].star_no == "12345"
assert cop2.assignments[0] == Assignment.query.get(98001)
cop3 = Officer.query.get(99003)
assert cop3.salaries[0].salary == 98765
assert cop3.salaries[0] == Salary.query.get(77001)
incident = Incident.query.get(66001)
assert incident.address.street_name == "Fake Street"
assert cop1.incidents[0] == incident
assert cop2.incidents[0] == incident
link = Link.query.get(55001)
assert link.url == "https://www.example.org/3629"
assert cop1.links[0] == link
def test_advanced_csv_import__extra_fields_officers(
session, department_with_ranks, tmp_path
):
department_name = department_with_ranks.name
# create csv with invalid field 'name'
officers_data = [
{"id": "", "department_name": department_name, "name": "John Smith"},
]
officers_csv = _create_csv(officers_data, tmp_path, "officers.csv")
# run command
result = run_command_print_output(
advanced_csv_import,
[str(department_with_ranks.name), "--officers-csv", officers_csv],
)
# expect the command to fail because of unexpected field 'name'
assert result.exception is not None
assert "unexpected" in str(result.exception).lower()
assert "name" in str(result.exception)
def test_advanced_csv_import__missing_required_field_officers(
session, department_with_ranks, tmp_path
):
department_name = department_with_ranks.name
# create csv with missing field 'id'
officers_data = [
{
"department_name": department_name,
"first_name": "John",
"last_name": "Smith",
},
]
officers_csv = _create_csv(officers_data, tmp_path, "officers.csv")
# run command
result = run_command_print_output(
advanced_csv_import,
[str(department_with_ranks.name), "--officers-csv", officers_csv],
)
# expect the command to fail because 'id' is missing
assert result.exception is not None
assert "missing" in str(result.exception).lower()
assert "id" in str(result.exception)
def test_advanced_csv_import__wrong_department(
session, department_with_ranks, tmp_path
):
department_name = department_with_ranks.name
other_department = Department(name="Other department", short_name="OPD")
session.add(other_department)
# create csv
officers_data = [
{
"id": "",
"department_name": department_name,
"first_name": "John",
"last_name": "Smith",
},
]
officers_csv = _create_csv(officers_data, tmp_path, "officers.csv")
# run command with wrong department name
result = run_command_print_output(
advanced_csv_import, [other_department.name, "--officers-csv", officers_csv],
)
# expect command to fail because the department name provided to the
# command is different than the one in the csv
assert result.exception is not None
assert result.exit_code != 0
def test_advanced_csv_import__update_officer_different_department(
session, department_with_ranks, tmp_path
):
department_name = department_with_ranks.name
# set up data
other_department = Department(name="Other department", short_name="OPD")
session.add(other_department)
officer = Officer(
id=99021, department_id=other_department.id, first_name="Chris", last_name="Doe"
)
session.add(officer)
# create csv to update the officer
officers_data = [
{
"id": 99021,
"department_name": department_name,
"first_name": "John",
"last_name": "Smith",
},
]
officers_csv = _create_csv(officers_data, tmp_path, "officers.csv")
# run command
result = run_command_print_output(
advanced_csv_import,
[str(department_with_ranks.name), "--officers-csv", officers_csv],
)
# command fails because the officer is assigned to a different department
# and cannot be updated
assert result.exception is not None
assert result.exit_code != 0
def test_advanced_csv_import__unit_other_department(
session, department_with_ranks, tmp_path
):
department_id = department_with_ranks.id
# set up data
officer = generate_officer()
officer.department_id = department_id
session.add(officer)
session.flush()
other_department = Department(name="Other department", short_name="OPD")
session.add(other_department)
session.flush()
unit = Unit(department_id=other_department.id)
session.add(unit)
session.flush()
# csv with unit_id referring to a unit in a different department
assignments_data = [
{
"id": "",
"officer_id": officer.id,
"job title": RANK_CHOICES_1[1],
"unit_id": unit.id,
}
]
assignments_csv = _create_csv(assignments_data, tmp_path, "assignments.csv")
result = run_command_print_output(
advanced_csv_import,
[department_with_ranks.name, "--assignments-csv", assignments_csv],
)
# command fails because the unit does not belong to the department
assert result.exception is not None
assert result.exit_code != 0
def test_create_officer_from_row_adds_new_officer_and_normalizes_gender(app, session):
with app.app_context():
department = Department(name="Cityname Police Department", short_name="CNPD")
session.add(department)
session.commit()
lookup_officer = Officer.query.filter_by(
first_name="NewOfficerFromRow").one_or_none()
assert lookup_officer is None
row = {
"gender": "Female",
"first_name": "NewOfficerFromRow",
"last_name": "Jones",
"employment_date": "1980-12-01",
"unique_internal_identifier": "officer-jones-unique-id",
}
create_officer_from_row(row, department.id)
lookup_officer = Officer.query.filter_by(
first_name="NewOfficerFromRow").one_or_none()
# Was an officer created in the database?
assert lookup_officer is not None
# Was the gender properly normalized?
assert lookup_officer.gender == "F"
| gpl-3.0 |
mhdella/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 105 | 26588 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
| bsd-3-clause |
marionleborgne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |
google-research/robustness_metrics | robustness_metrics/bin/compute_report.py | 1 | 5350 | # coding=utf-8
# Copyright 2021 The Robustness Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Generate a set of robustness reports on the given model.
The script accepts a path to the python file (including the .py extension),
that should contain:
A function `create` that returns a tuple consisting of
* the model as a callable: it accepts a dictionary holding batched data and
returns a tensor of predictions; and
* the preprocessing as a callable: it is mapped over the dataset before
batching (use `None` for default preprocessing).
As an example see the file `models/random_imagenet_numpy.py`.
There are two ways how one can specify which metrics on which datasets it should
run:
- One can explicitly provide them in the flag `--measurement`, e.g., passing
--measurement=accuracy@imagenet --brier@imagenet_a will compute the accuracy
of the imagenet dataset and the Brier score over the imagenet_a dataset.
- By passing a report name in `--report`. The script will figure out which
combinations of metrics and datasets the report needs and will evaluate all
of them.
Note that each of these flags can be provided multiple times and the script
evaluates the union of them. All the measurements provided in `--measurement`
will appear under the report with name "custom".
The results are printed to the standard output and can be also saved to a JSON
file using the `--output_json` flag. The json file will hold an array of two
dictionaries (JSON objects):
* The first one holding a map metric_name -> dataset_name -> result.
* The second one holding a map report-> report_value_name -> value.
# Note for non-TF models
Please use the flag `--tf_on_cpu`, so that TensorFlow will not allocate any of
the GPU memory.
"""
import json
from absl import app
from absl import flags
import pandas as pd
import robustness_metrics as rm
from robustness_metrics.bin import common as bin_common
from robustness_metrics.bin import compute_report_lib as lib
import tabulate
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_multi_string(
"report", [], "The specifications of the reports to be computed")
flags.DEFINE_multi_string(
"measurement", [],
"A @-separated pair specifying which metric to compute on which dataset. "
"Must be specified if `report` is not specified.")
flags.DEFINE_integer("batch_size", 32, "The batch size to use.")
flags.DEFINE_string("model_path", None,
"A path to the python file defining the model.")
flags.DEFINE_string(
"model_args", "",
"The arguments to be passed to the create() function of the model. Will "
"be literal_eval'ed, should take the form a='1',b='2',c=3.")
flags.DEFINE_string("output_json_path", None,
"Where to store the json-serialized output")
flags.DEFINE_bool("tf_on_cpu", False,
"If set, will hide accelerators from TF.")
flags.mark_flags_as_required(["model_path"])
def _register_custom_report():
"""Registers a report named `custom` from the given --measurement flags."""
all_measurements = []
for metric_name, dataset_name in map(lambda spec: spec.split("@"),
FLAGS.measurement):
all_measurements.append(rm.reports.base.MeasurementSpec(
dataset_name=dataset_name, metric_name=metric_name))
@rm.reports.base.registry.register("custom") # pylint: disable=unused-variable
class CustomReport(rm.reports.base.UnionReport):
@property
def required_measurements(self):
return all_measurements
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if FLAGS.tf_on_cpu:
# Hide the GPU from TF.
tf.config.experimental.set_visible_devices([], "GPU")
strategy = bin_common.default_distribution_strategy()
module = bin_common.load_module_from_path(FLAGS.model_path)
with strategy.scope():
_, _, kwargs = rm.common.registry.parse_name_and_kwargs(
f"foo({FLAGS.model_args})")
model, preprocess_fn = module.create(**kwargs)
if FLAGS.measurement:
_register_custom_report()
FLAGS.report.append("custom")
metric_results, report_results = lib.compute_reports(
strategy, FLAGS.report, model, preprocess_fn, FLAGS.batch_size)
for metric_name, results in metric_results.items():
print(f"metric: {metric_name}")
print(tabulate.tabulate(pd.DataFrame.from_dict(results), headers="keys"))
for report_name, results in report_results.items():
print(f"report: {report_name}")
print(tabulate.tabulate(results.items(), headers=["score name", "value"]))
if FLAGS.output_json_path:
with tf.io.gfile.GFile(FLAGS.output_json_path, "wb") as json_fp:
json.dump((metric_results, report_results), json_fp)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
etkirsch/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
kosklain/MitosisDetection | Trainer.py | 1 | 6945 | """
Created on Jun 20, 2013
Used for the training phase. It builds a model for the images in
train_data_path (JSON file).
It is also used for checking how many canditates per mitotic point
we have (that is, the quality of the segmentation), by using the
appropriate functionality in FeatureGetter.
@author: Bibiana and Adria
"""
import data_io
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from FeatureGetter import FeatureGetter
from ImageSaver import ImageSaver
import os
from Utils import Utils
from WndchrmWorker import WndchrmWorkerTrain
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
class Trainer:
def __init__(self, load=False, loadWndchrm=False):
self.load = load
self.loadWndchrm = loadWndchrm
"""
Used for wndchrm. It moves all the old files to a new place, so they do not get overwritten.
"""
def prepareEnvironment(self):
# People want to save time
trainingPathPositive = os.path.join(data_io.get_training_folder(), data_io.get_positive_folder())
trainingPathOldPositive = os.path.join(data_io.get_training_old_folder(), data_io.get_positive_folder())
Utils.shift(data_io.get_training_old_folder(), trainingPathOldPositive, data_io.get_positive_folder(), trainingPathPositive)
trainingPathNegative = os.path.join(data_io.get_training_folder(), data_io.get_negative_folder())
trainingPathOldNegative = os.path.join(data_io.get_training_old_folder(), data_io.get_negative_folder())
Utils.shift(data_io.get_training_old_folder(), trainingPathOldNegative, data_io.get_negative_folder(), trainingPathNegative)
os.mkdir(trainingPathPositive)
os.mkdir(trainingPathNegative)
if not self.load:
Utils.shift('.', data_io.get_savez_name(), data_io.get_savez_name(), data_io.get_savez_name())
if not self.loadWndchrm:
Utils.shift('.', data_io.get_wndchrm_dataset(), data_io.get_wndchrm_dataset(), data_io.get_wndchrm_dataset())
def run(self):
print "Preparing the environment"
self.prepareEnvironment()
print "Reading in the training data"
imageCollections = data_io.get_train_df()
wndchrmWorker = WndchrmWorkerTrain()
print "Getting features"
if not self.loadWndchrm: #Last wndchrm set of features
featureGetter = FeatureGetter()
fileName = data_io.get_savez_name()
if not self.load: #Last features calculated from candidates
(namesObservations, coordinates, train) = Utils.calculateFeatures(fileName, featureGetter, imageCollections)
else:
(namesObservations, coordinates, train) = Utils.loadFeatures(fileName)
print "Getting target vector"
(indexes, target, obs) = featureGetter.getTargetVector(coordinates, namesObservations, train)
print "Saving images"
imageSaver = ImageSaver(coordinates[indexes], namesObservations[indexes],
imageCollections, featureGetter.patchSize, target[indexes])
imageSaver.saveImages()
print "Executing wndchrm algorithm and extracting features"
(train, target) = wndchrmWorker.executeWndchrm()
else:
(train, target) = wndchrmWorker.loadWndchrmFeatures()
print "Training the model"
model = RandomForestClassifier(n_estimators=500, verbose=2, n_jobs=1, min_samples_split=30, random_state=1, compute_importances=True)
model.fit(train, target)
print model.feature_importances_
print "Saving the classifier"
data_io.save_model(model)
def runWithoutWndchrm(self):
print "Reading in the training data"
imageCollections = data_io.get_train_df()
print "Getting features"
featureGetter = FeatureGetter()
fileName = data_io.get_savez_name()
if not self.load: #Last features calculated from candidates
(namesObservations, coordinates, train) = Utils.calculateFeatures(fileName, featureGetter, imageCollections)
else:
(namesObservations, coordinates, train) = Utils.loadFeatures(fileName)
print "Getting target vector"
(indexes, target, obs) = featureGetter.getTargetVector(coordinates, namesObservations, train)
print "Training the model"
classifier = RandomForestClassifier(n_estimators=500, verbose=2, n_jobs=1, min_samples_split=10, random_state=1, compute_importances=True)
#classifier = KNeighborsClassifier(n_neighbors=50)
model = Pipeline([('scaling', MinMaxScaler()), ('classifying', classifier)])
model.fit(obs[indexes], target[indexes])
print "Saving the classifier"
data_io.save_model(model)
"""
Checks the quality of the segmentation.
"""
def checkCandidates(self):
imageCollections = data_io.get_train_df()
featureGetter = FeatureGetter()
(namesObservations, coordinates, train) = featureGetter.getTransformedDatasetChecking(imageCollections)
imageNames = namesObservations
currentImage = imageNames[0]
csvArray = Utils.readcsv(imageNames[0])
mitoticPointsDetected = 0
totalMitoticPoints = len(csvArray)
finalTrain = []
for i in range(len(coordinates)):
if imageNames[i] != currentImage:
csvArray = Utils.readcsv(imageNames[i])
totalMitoticPoints += len(csvArray)
currentImage = imageNames[i]
for point in csvArray:
if ((point[0]-coordinates[i][0]) ** 2 + (point[1]-coordinates[i][1]) ** 2)< 30**2:
mitoticPointsDetected += 1
csvArray.remove(point)
finalTrain.append(train[i])
break
finalTrain = np.array(finalTrain)
allArea = finalTrain[:,0]
allPerimeter = finalTrain[:,1]
allRoundness = finalTrain[:,2]
totalObservations = len(coordinates)
print "Minimum Area: %f" % np.min(allArea)
print "Minimum Perimeter: %f" % np.min(allPerimeter)
print "Minimum Roundness: %f" % np.min(allRoundness)
print "Maximum Area: %f" % np.max(allArea)
print "Maximum Perimeter: %f" % np.max(allPerimeter)
print "Maximum Roundness: %f" % np.max(allRoundness)
print "Total number of candidates: %d" % (totalObservations)
print "Total number of mitotic points: %d" %(totalMitoticPoints)
print "Mitotic points detected: %d" %(mitoticPointsDetected)
print "Mitotic points missed: %d" %(totalMitoticPoints-mitoticPointsDetected)
if __name__ == "__main__":
tr = Trainer(load=True, loadWndchrm=True)
tr.checkCandidates()
| gpl-2.0 |
ahoyosid/scikit-learn | examples/linear_model/plot_ols.py | 45 | 1985 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis]
diabetes_X_temp = diabetes_X[:, :, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X_temp[:-20]
diabetes_X_test = diabetes_X_temp[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
artmusic0/theano-learning.part02 | Training_data/rd_file_resize_test.py | 5 | 1961 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 18:46:28 2015
@author: winpython
"""
from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import cPickle
temp_line = np.zeros(784)
final_output = np.empty((200,784),dtype=np.float32)
final_label = np.ones((200),dtype=np.int64)
cd = 0
for i in range(10):
for j in range (20):
print "i", i , " j", j
pil_im = Image.open( str(i) + "_" + str(j) + ".jpg" ).convert('L')
imshow(np.asarray(pil_im)) # before resize
pil_im = pil_im.resize((28, 28), Image.BILINEAR )
pil_im = np.array(pil_im)
fig = plt.figure()
plotwindow = fig.add_subplot()
plt.imshow(pil_im, cmap='gray')
plt.show()
#print("test")
#print(pil_im)
cr = 0
print "Read line", cd ,
for k in range(28):
for l in range(28):
temp_line[cr]= pil_im[k][l]/225.
print " in ", cr, "..."
cr += 1
print "Combine line"
final_output[cd] = temp_line
cd += 1
print "Finished Picture..."
print "Starting label"
cntddd = 0
for i in range(10):
for j in range (20):
final_label[cntddd] = i
cntddd += 1
print "Finished Labeling..."
print "Starting cpickle"
outputandlabel = final_output, final_label
f = file("training_data.pkl", 'wb')
cPickle.dump(outputandlabel, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
print "Finished cPickle..."
print "\ ! congradulation ! /"
#f = open("pic1.txt", "r")
'''
imshow(np.asarray(pil_im)) # before resize
pil_im = pil_im.resize((28, 28), Image.BILINEAR )
pil_im = np.array(pil_im)
#print(np.array(pil_im))
#imshow(np.asarray(pil_im))
fig = plt.figure()
plotwindow = fig.add_subplot()
plt.imshow(pil_im, cmap='gray')
plt.show()
print("test")
print(pil_im)
''' | gpl-3.0 |
SenGonzo/ia_tools | one_off_code/Hidden_Analysis.py | 1 | 5270 |
import pandas as pd
import Attack_Calc as atk
def data_input():
# import and fold data
df = pd.read_csv('input_data/units.csv')
# df = df[(df['faction'] == 'scum')]
# df = df[(df['single model health'] >= 5)]
df.sort_values(by=['name'], ascending=[True], inplace=True)
rez_df = pd.DataFrame(columns=('name', 'cost', 'type', 'group', 'blk_ev', 'hid_blk_ev', 'wht_ev', 'hid_wht_ev',
'health', 'blk_delta', 'wht_delta'))
x = 0
for index, row in df.iterrows():
print(row['name'])
surge_1 = [int(i) for i in row['surge 1'].split(',')]
surge_2 = [int(i) for i in row['surge 2'].split(',')]
surge_3 = [int(i) for i in row['surge 3'].split(',')]
surge_4 = [int(i) for i in row['surge 4'].split(',')]
attribute_array = [row['damage att'], row['surge att'], row['acc att'], 0, 0, 0]
if row['deadly'] == 1:
deadly = True
else:
deadly = False
blk_ev, blk_var, blk_x_array, blk_y_array = atk.results_calc(row['name'],
row['dice'].split(', '), ['black'],
surge_array=[surge_1, surge_2, surge_3, surge_4],
attribute_array=attribute_array,
distance=0,
deadly=deadly,
number_of_attacks=1,
atk_reroll_attack=row['reroll attack'],
atk_reroll_def=row['reroll def'],
focused=0)
fcs_blk_ev, fcs_blk_var, \
fcs_blk_x_array, fcs_blk_y_array = atk.results_calc(row['name'],
row['dice'].split(', '), ['black'],
surge_array=[surge_1, surge_2, surge_3, surge_4],
attribute_array=attribute_array,
distance=0,
deadly=deadly,
number_of_attacks=1,
atk_reroll_attack=row['reroll attack'],
atk_reroll_def=row['reroll def'],
hidden=1)
wht_ev, wht_var, wht_x_array, wht_y_array = atk.results_calc(row['name'],
row['dice'].split(', '), ['white'],
surge_array=[surge_1, surge_2, surge_3, surge_4],
attribute_array=attribute_array,
distance=0,
deadly=deadly,
number_of_attacks=1,
atk_reroll_attack=row['reroll attack'],
atk_reroll_def=row['reroll def'],
focused=0)
fcs_wht_ev, fcs_wht_var, \
fcs_wht_x_array, fcs_wht_y_array = atk.results_calc(row['name'],
row['dice'].split(', '), ['white'],
surge_array=[surge_1, surge_2, surge_3, surge_4],
attribute_array=attribute_array,
distance=0,
deadly=deadly,
number_of_attacks=1,
atk_reroll_attack=row['reroll attack'],
atk_reroll_def=row['reroll def'],
hidden=1)
rez_df.loc[x] = [row['name'], row['cost'], row['type'], row['group'], blk_ev, fcs_blk_ev, wht_ev, fcs_wht_ev,
row['single model health'], fcs_blk_ev - blk_ev, fcs_wht_ev - wht_ev]
x += 1
return rez_df
def create_stack_rank(rez_df):
rez_df['total'] = (rez_df['blk_delta'] + rez_df['wht_delta'])
rez_df.sort_values(by=['total'], ascending=[False], inplace=True)
rez_df.to_csv('output_data/hidden_rank.csv')
| mit |
webmasterraj/FogOrNot | app/data/sfmap.py | 1 | 2370 | from shapely.geometry import shape, Point
import pandas as pd
import numpy as np
import sql
SF = sql.getNeighborhoodsJSON()
def addNeighborhood(stations):
for s in stations:
s['neighborhood'] = whichNeighborhood(s)
return stations
def whichNeighborhood(station):
station_loc = Point(station['lon'], station['lat'])
for neighborhood in SF['features']:
poly = shape(neighborhood['geometry'])
if poly.contains(station_loc):
return neighborhood['properties']['neighborho']
return None
def surroundingNeighborhoods(neighborhood):
neighborhood_shape = shape(neighborhood['geometry'])
surrounding_neighborhoods = []
for other in SF['features']:
if neighborhood_shape.touches(shape(other['geometry'])):
surrounding_neighborhoods.append(other['properties']['neighborho'])
return surrounding_neighborhoods
def addForecastsToJSON(df):
empty_neighborhoods = []
for neighborhood in SF['features']:
neighborhood_name = neighborhood['properties']['neighborho']
df_slice = df[df['neighborhood'] == neighborhood_name]
print neighborhood_name, df_slice.shape
if df_slice.shape[0] == 0: # in case that neighborhood has no stations
empty_neighborhoods.append(neighborhood)
# print [n['properties']['neighborho'] for n in empty_neighborhoods]
else:
forecasts_dict = df_slice.groupby('hour').aggregate(np.mean).to_dict()
neighborhood['properties'].update(forecasts_dict['forecast'])
for neighborhood in empty_neighborhoods:
neighborhood_name = neighborhood['properties']['neighborho']
surrounding_neighborhoods = surroundingNeighborhoods(neighborhood)
print "** NO STATIONS: {0}".format(neighborhood_name)
print "** Using these instead: ", surrounding_neighborhoods
print
surrounding_forecasts_dfs = [df[df['neighborhood'] == other_name] for other_name in surrounding_neighborhoods]
df_slice = pd.concat(surrounding_forecasts_dfs) if surrounding_forecasts_dfs \
else df[df['neighborhood'] == neighborhood_name]
forecasts_dict = df_slice.groupby('hour').aggregate(np.mean).to_dict()
neighborhood['properties'].update(forecasts_dict['forecast'])
print neighborhood_name, '\n', forecasts_dict, '\n\n'
def createNewJSON():
sql.writeForecastsJSON(SF)
| gpl-2.0 |
jaeilepp/mne-python | doc/conf.py | 1 | 11900 | # -*- coding: utf-8 -*-
#
# MNE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import inspect
import os
from os.path import relpath, dirname
import sys
from datetime import date
import sphinx_gallery # noqa
import sphinx_bootstrap_theme
from numpydoc import numpydoc, docscrape # noqa
import mne
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
if not os.path.isdir('_images'):
os.mkdir('_images')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# XXX This hack defines what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx_gallery.gen_gallery',
'numpydoc',
'gen_commands',
]
autosummary_generate = True
autodoc_default_flags = ['inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNE'
td = date.today()
copyright = u'2012-%s, MNE Developers. Last updated on %s' % (td.year,
td.isoformat())
nitpicky = True
needs_sphinx = '1.5'
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
exclude_patterns = ['source/generated']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx' # friendly, manni, murphy, tango
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': ' ', # we replace this with an image
'source_link_position': "nav", # default
'bootswatch_theme': "flatly", # yeti paper lumen
'navbar_sidebarrel': False, # Render the next/prev links in navbar?
'navbar_pagenav': False,
'navbar_class': "navbar",
'bootstrap_version': "3", # default
'navbar_links': [
("Install", "getting_started"),
("Documentation", "documentation"),
("API", "python_reference"),
("Examples", "auto_examples/index"),
("Contribute", "contributing"),
],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_images']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True, 'use_twitter': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ---------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
# ('index', 'MNE.tex', u'MNE Manual',
# u'MNE Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy-dev/', None),
'scipy': ('http://scipy.github.io/devdocs/', None),
'sklearn': ('http://scikit-learn.org/stable/', None),
'matplotlib': ('http://matplotlib.org/', None),
}
examples_dirs = ['../examples', '../tutorials']
gallery_dirs = ['auto_examples', 'auto_tutorials']
try:
mlab = mne.utils._import_mlab()
find_mayavi_figures = True
# Do not pop up any mayavi windows while running the
# examples. These are very annoying since they steal the focus.
mlab.options.offscreen = True
except Exception:
find_mayavi_figures = False
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': {
'mne': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy/reference',
'scipy': 'http://docs.scipy.org/doc/scipy/reference',
'mayavi': 'http://docs.enthought.com/mayavi/mayavi'},
'examples_dirs': examples_dirs,
'gallery_dirs': gallery_dirs,
'find_mayavi_figures': find_mayavi_figures,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
}
numpydoc_class_members_toctree = False
# -----------------------------------------------------------------------------
# Source code links (adapted from SciPy (doc/source/conf.py))
# -----------------------------------------------------------------------------
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(mne.__file__))
if 'git' in mne.__version__:
return "http://github.com/mne-tools/mne-python/blob/master/mne/%s%s" % ( # noqa
fn, linespec)
else:
return "http://github.com/mne-tools/mne-python/blob/maint/%s/mne/%s%s" % ( # noqa
mne.__version__, fn, linespec)
| bsd-3-clause |
msneddon/narrative | kbase-extension/jupyter_config.py | 7 | 20464 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = u''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.TerminalIPythonApp.reraise_ipython_extension_failures = False
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.TerminalInteractiveShell.display_page = False
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.6 (default, Nov 18 2013, 15:12:51) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.2.0-dev -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| mit |
nvoron23/statsmodels | statsmodels/datasets/elnino/data.py | 25 | 1779 | """El Nino dataset, 1950 - 2010"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is in the public domain."""
TITLE = """El Nino - Sea Surface Temperatures"""
SOURCE = """
National Oceanic and Atmospheric Administration's National Weather Service
ERSST.V3B dataset, Nino 1+2
http://www.cpc.ncep.noaa.gov/data/indices/
"""
DESCRSHORT = """Averaged monthly sea surface temperature - Pacific Ocean."""
DESCRLONG = """This data contains the averaged monthly sea surface
temperature in degrees Celcius of the Pacific Ocean, between 0-10 degrees South
and 90-80 degrees West, from 1950 to 2010. This dataset was obtained from
NOAA.
"""
NOTE = """::
Number of Observations - 61 x 12
Number of Variables - 1
Variable name definitions::
TEMPERATURE - average sea surface temperature in degrees Celcius
(12 columns, one per month).
"""
from numpy import recfromtxt, column_stack, array
from pandas import DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the El Nino data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The elnino Dataset instance does not contain endog and exog attributes.
"""
data = _get_data()
names = data.dtype.names
dataset = Dataset(data=data, names=names)
return dataset
def load_pandas():
dataset = load()
dataset.data = DataFrame(dataset.data)
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/elnino.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
Yurlungur/pyballd | test/test_orthopoly.py | 1 | 4408 | #!/usr/bin/env python2
"""
test_orthopoly.py
Author: Jonah Miller ([email protected])
Time-stamp: <2017-06-28 20:30:18 (jmiller)>
Tests the orthopoly module.
"""
from __future__ import print_function
import matplotlib as mpl
mpl.use("Agg")
from matplotlib import pyplot as plt
import numpy as np
mpl.rcParams.update({'font.size':12})
import pyballd
from pyballd.orthopoly import PseudoSpectralDiscretization2D
XMIN,XMAX = -np.pi/2.,np.pi/2.
YMIN,YMAX = 0,2*np.pi
KX = 1
KY = 2
X_OVER_Y = 2
USE_FIGS_DIR=False
f = lambda x,y: np.cos(KX*x)*np.sin(KY*y)
dfdx = lambda x,y: -KX*np.sin(KX*x)*np.sin(KY*y)
dfdx2 = lambda x,y: -KX*KX*np.cos(KX*x)*np.sin(KY*y)
dfdy = lambda x,y: KY*np.cos(KX*x)*np.cos(KY*y)
dfdy2 = lambda x,y: -KY*KY*np.cos(KX*x)*np.sin(KY*y)
dfdxdy = lambda x,y: -KX*KY*np.sin(KX*x)*np.cos(KY*y)
g = lambda x,y: dfdx2(x,y) + dfdy2(x,y) + dfdxdy(x,y)
def test_derivatives_at_order(ordery):
orderx = X_OVER_Y*ordery
s = PseudoSpectralDiscretization2D(orderx,XMIN,XMAX,
ordery,YMIN,YMAX)
X,Y = s.get_x2d()
f_ana = f(X,Y)
g_ana = g(X,Y)
g_num = (s.differentiate(f_ana,2,0)
+ s.differentiate(f_ana,0,2)
+ s.differentiate(f_ana,1,1))
delta_g = g_num - g_ana
norm2dg = s.norm2(delta_g)
return norm2dg
def plot_test_function(orderx,ordery):
s = PseudoSpectralDiscretization2D(orderx,XMIN,XMAX,
ordery,YMIN,YMAX)
X,Y = s.get_x2d()
f_ana = f(X,Y)
plt.pcolor(X,Y,f_ana)
plt.xlabel('x',fontsize=16)
plt.ylabel('y',fontsize=16)
plt.xlim(XMIN,XMAX)
plt.ylim(YMIN,YMAX)
cb = plt.colorbar()
cb.set_label(label=r'$\cos(x)\sin(2 y)$',fontsize=16)
for postfix in ['.png','.pdf']:
name = 'test_function'+postfix
if USE_FIGS_DIR:
name = 'figs/' + name
plt.savefig(name,
bbox_inches='tight')
plt.clf()
def test_derivatives():
orders = [4+(2*i) for i in range(12)]
errors = [test_derivatives_at_order(o) for o in orders]
plt.semilogy(orders,errors,'bo-',lw=2,ms=12)
plt.xlabel('order in y-direction',fontsize=16)
plt.ylabel(r'$|E|_2$',fontsize=16)
for postfix in ['.png','.pdf']:
name = 'orthopoly_errors'+postfix
if USE_FIGS_DIR:
name = 'figs/' + name
plt.savefig(name,
bbox_inches='tight')
plt.clf()
def test_interp_at_order(ordery):
orderx = X_OVER_Y*ordery
s = PseudoSpectralDiscretization2D(orderx,XMIN,XMAX,
ordery,YMIN,YMAX)
Xc,Yc = s.get_x2d()
x = np.linspace(XMIN,XMAX,100)
y = np.linspace(YMIN,YMAX,100)
Xf,Yf = np.meshgrid(x,y,indexing='ij')
f_coarse = f(Xc,Yc)
f_fine = f(Xf,Yf)
f_interpolator = s.to_continuum(f_coarse)
f_num = f_interpolator(Xf,Yf)
delta = f_num - f_fine
return np.max(np.abs(delta))
def plot_interpolation(orderx,ordery):
s = PseudoSpectralDiscretization2D(orderx,XMIN,XMAX,
ordery,YMIN,YMAX)
Xc,Yc = s.get_x2d()
x = np.linspace(XMIN,XMAX,100)
y = np.linspace(YMIN,YMAX,100)
Xf,Yf = np.meshgrid(x,y,indexing='ij')
f_coarse = f(Xc,Yc)
f_interpolator = s.to_continuum(f_coarse)
f_num = f_interpolator(Xf,Yf)
plt.pcolor(Xf,Yf,f_num)
cb = plt.colorbar()
cb.set_label('interpolated function',fontsize=16)
plt.xlabel('x')
plt.ylabel('y')
for postfix in ['.png','.pdf']:
name = 'orthopoly_interpolated_function'+postfix
if USE_FIGS_DIR:
name = 'figs/' + name
plt.savefig(name,
bbox_inches='tight')
plt.clf()
def test_interpolation():
xfine = np.linspace(XMIN,XMAX,100)
yfine = np.linspace(YMIN,YMAX,100)
orders = [4+(2*i) for i in range(12)]
errors = [test_interp_at_order(o) for o in orders]
plt.semilogy(orders,errors,'bo-',lw=2,ms=12)
plt.xlabel('order in y-direction',fontsize=16)
plt.ylabel('max(interpolation error)',fontsize=16)
for postfix in ['.png','.pdf']:
name = 'orthopoly_interp_errors'+postfix
if USE_FIGS_DIR:
name = 'figs/' + name
plt.savefig(name,
bbox_inches='tight')
plt.clf()
if __name__ == "__main__":
plot_test_function(80,160)
test_derivatives()
plot_interpolation(10,20)
test_interpolation()
| lgpl-3.0 |
rajat1994/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 130 | 50966 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
kazemakase/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
Petr-Kovalev/nupic-win32 | external/linux32/lib/python2.6/site-packages/matplotlib/blocking_input.py | 69 | 12119 | """
This provides several classes used for blocking interaction with figure windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
import time
import numpy as np
from matplotlib import path, verbose
from matplotlib.cbook import is_sequence_of_strings
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
assert is_sequence_of_strings(eventslist), "Requires a sequence of event name strings"
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
verbose.report("Event %i" % len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks=[]
def add_event(self,event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self,index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self,index=-1):
self.pop_event(index)
pop.__doc__=pop_event.__doc__
def __call__(self, n=1, timeout=30 ):
"""
Blocking call to retrieve n events
"""
assert isinstance(n, int), "Requires an integer argument"
self.n = n
self.events = []
self.callbacks = []
# Ensure that the figure is shown
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append( self.fig.canvas.mpl_connect(n, self.on_event) )
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event') )
def post_event(self):
"""
This will be called to process events
"""
assert len(self.events)>0, "No events yet"
if self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == 3:
self.button3(event)
elif button == 2:
self.button2(event)
else:
self.button1(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
key = event.key
if key == 'backspace' or key == 'delete':
self.button3(event)
elif key == 'enter':
self.button2(event)
else:
self.button1(event)
def button1( self, event ):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self)
def button2( self, event ):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self)
# This will exit even if not in infinite mode. This is
# consistent with matlab and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def button3( self, event ):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self)
# Now remove any existing clicks if possible
if len(self.events)>0:
self.pop()
def add_click(self,event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata,event.ydata))
verbose.report("input %i: %f,%f" %
(len(self.clicks),event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
self.marks.extend(
event.inaxes.plot([event.xdata,], [event.ydata,], 'r+') )
self.fig.canvas.draw()
def pop_click(self,index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
def pop(self,index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(index)
BlockingInput.pop(self,index)
def cleanup(self):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self,n=n,timeout=timeout)
return self.clicks
class BlockingContourLabeler( BlockingMouseInput ):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self,cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure )
def button1(self,event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
cs = self.cs
if event.inaxes == cs.ax:
conmin,segmin,imin,xmin,ymin = cs.find_nearest_contour(
event.x, event.y, cs.labelIndiceList)[:5]
# Get index of nearest level in subset of levels used for labeling
lmin = cs.labelIndiceList.index(conmin)
# Coordinates of contour
paths = cs.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = cs.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = cs.get_label_width(cs.labelLevelList[lmin],
cs.labelFmt, cs.labelFontSizeList[lmin])
"""
# requires python 2.5
# Figure out label rotation.
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lc if self.inline else [],
self.inline_spacing )
"""
# Figure out label rotation.
if self.inline: lcarg = lc
else: lcarg = None
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
self.inline_spacing )
cs.add_label(xmin,ymin,rotation,cs.labelLevelList[lmin],
cs.labelCValueList[lmin])
if self.inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n)>1:
paths.append( path.Path(n) )
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self,event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
# Remove this last event - not too important for clabel use
# since clabel normally doesn't have a maximum number of
# events, but best for cleanliness sake.
BlockingInput.pop(self)
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self,inline,inline_spacing=5,n=-1,timeout=-1):
self.inline=inline
self.inline_spacing=inline_spacing
BlockingMouseInput.__call__(self,n=n,timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=('button_press_event','key_press_event') )
def post_event(self):
"""
Determines if it is a key event
"""
assert len(self.events)>0, "No events yet"
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self,n=1,timeout=timeout)
return self.keyormouse
| gpl-3.0 |
fmacias64/Dato-Core | src/unity/python/graphlab/test/test_dataframe.py | 13 | 1711 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import unittest
import pandas
import array
from graphlab.cython.cy_dataframe import _dataframe
from pandas.util.testing import assert_frame_equal
class DataFrameTest(unittest.TestCase):
def test_empty(self):
expected = pandas.DataFrame()
assert_frame_equal(_dataframe(expected), expected)
expected['int'] = []
expected['float'] = []
expected['str'] = []
assert_frame_equal(_dataframe(expected), expected)
def test_simple_dataframe(self):
expected = pandas.DataFrame()
expected['int'] = [i for i in range(10)]
expected['float'] = [float(i) for i in range(10)]
expected['str'] = [str(i) for i in range(10)]
expected['unicode'] = [unicode(i) for i in range(10)]
expected['array'] = [array.array('d', [i]) for i in range(10)]
expected['ls'] = [[str(i)] for i in range(10)]
assert_frame_equal(_dataframe(expected), expected)
def test_sparse_dataframe(self):
expected = pandas.DataFrame()
expected['sparse_int'] = [i if i % 2 == 0 else None for i in range(10)]
expected['sparse_float'] = [float(i) if i % 2 == 1 else None for i in range(10)]
expected['sparse_str'] = [str(i) if i % 3 == 0 else None for i in range(10)]
expected['sparse_array'] = [array.array('d', [i]) if i % 5 == 0 else None for i in range(10)]
expected['sparse_list'] = [[str(i)] if i % 7 == 0 else None for i in range(10)]
assert_frame_equal(_dataframe(expected), expected)
| agpl-3.0 |
cs60050/TeamGabru | 1-Optical-Flow/2-ML-Model/anamoly-detection-classifier.py | 1 | 5879 | from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn import metrics
import os
import codecs
import numpy as np
import pickle
basepath = os.path.dirname(os.path.abspath(__file__))+"/../featueExtraction/Output"
model_path = os.path.dirname(os.path.abspath(__file__))+"/TrainedClassifiers"
output_path = os.path.dirname(os.path.abspath(__file__))+"/Output"
eval_path = os.path.dirname(os.path.abspath(__file__))+"/Evaluation"
names = ["DecisionTree"]
evaluation_names = ["Accuracy","F1 Score","F1_Micro","F1_Macro","F1_Weighted","Log_Loss","Precision","Recall","ROC_AUC"]
def evaluate(y_true,y_pred):
return [accuracy_score(y_true, y_pred),
f1_score(y_true, y_pred, average=None),
f1_score(y_true, y_pred, average='micro'),
f1_score(y_true, y_pred, average='macro'),
f1_score(y_true, y_pred, average='weighted'),
log_loss(y_true,y_pred),
precision_score(y_true, y_pred, average=None),
recall_score(y_true, y_pred, average=None),
roc_auc_score(y_true, y_pred)]
def auc_and_eer(y_true, y_pred):
fpr, tpr, _ = metrics.roc_curve(y_true, y_pred)
return [metrics.auc(fpr, tpr), EER(fpr, tpr)]
def EER(fpr, tpr):
from scipy.optimize import brentq
from scipy.interpolate import interp1d
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
return eer
def load_train_dataset(train_path):
files = os.listdir(train_path)
X_train = []
y_train = []
for filename in files:
if filename == ".DS_Store":
continue
file = codecs.open(train_path+"/"+filename,'r','utf-8')
for row in file:
l = row.strip().split(",")
X_train.append(l[0:11])
y_train.append(int(l[11]))
print(filename)
return X_train,y_train
def load_test_dataset(test_path):
files = os.listdir(test_path)
X_test = []
y_true = []
for filename in files:
if filename == ".DS_Store":
continue
file = codecs.open(test_path+"/"+filename,'r','utf-8')
for row in file:
l = row.strip().split(",")
X_test.append(l[0:11])
y_true.append(int(l[11]))
print(filename)
return X_test,y_true
def plot():
#Two subplots, unpack the axes array immediately
f, ax1= plt.subplots(1)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
def main():
treshold_dirs = os.listdir(basepath)
for dir in treshold_dirs:
if dir == ".DS_Store":
continue
print(dir)
ped_dirs = os.listdir(basepath+"/"+dir)
for sub_dir in ped_dirs:
if sub_dir == ".DS_Store":
continue
print(dir,sub_dir)
train_path = basepath+"/"+dir+"/"+sub_dir+"/Train"
test_path = basepath+"/"+dir+"/"+sub_dir+"/Test"
write_file = codecs.open(output_path+"/"+dir+"_"+sub_dir+"-output.txt",'w','utf-8')
eval_file = codecs.open(eval_path+"/"+dir+"_"+sub_dir+"-evaluation_scores.txt",'w','utf-8')
X_train,y_train = load_train_dataset(train_path)
X_test,y_true = load_test_dataset(test_path)
# pca = PCA(n_components = 2)
# X_red, y_red = pca.fit_transform(X_train, y_train)
print(train_path,test_path)
classifiers = [
DecisionTreeClassifier(max_depth=5)]
for algo, clf in zip(names, classifiers):
try:
with open(model_path+"/"+dir+"/"+sub_dir+"/"+algo + '.pkl', 'rb') as f1:
clf = pickle.load(f1)
except:
clf.fit(X_train, y_train)
with open(model_path+"/"+dir+"/"+sub_dir+"/"+algo + '.pkl', 'wb') as f1:
pickle.dump(clf, f1)
predicted = []
print(algo+"_fitted")
for ind in range(0,len(X_test)):
try:
vector = np.matrix(X_test[ind])
predicted+=[clf.predict(vector)[0]]
except:
print("Error")
print(algo, predicted, file=write_file)
print(algo+"_Tested")
report = metrics.classification_report(y_true, predicted)
print(algo,file=eval_file)
print(report, file = eval_file)
#print(evaluate(y_test, y_hat))
print(auc_and_eer(y_true, predicted), file = eval_file)
# scores = evaluate(y_true,predicted)
# print(algo+"\t"+str(scores),file=eval_file)
if __name__ == "__main__":main()
| mit |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_inst_NoRot/Geneva_inst_NoRot_6/fullgrid/peaks_reader.py | 32 | 5021 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ---------------------------------------------------
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks', max_values, delimiter='\t')
| gpl-2.0 |
robbymeals/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/stem_plot.py | 1 | 1063 | """
=========
Stem Plot
=========
Example stem plot.
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
x = np.linspace(0.1, 2 * np.pi, 10)
markerline, stemlines, baseline = plt.stem(x, np.cos(x), '-.')
plt.setp(baseline, 'color', 'r', 'linewidth', 2)
pltshow(plt)
| mit |
zrhans/python | exemplos/Examples.lnk/bokeh/charts/iris_scatter.py | 1 | 1070 | from collections import OrderedDict
import numpy as np
import pandas as pd
from bokeh.sampledata.iris import flowers
from bokeh.charts import Scatter
from bokeh.plotting import output_file, show
# we fill a df with the data of interest and create a groupby pandas object
df = flowers[["petal_length", "petal_width", "species"]]
xyvalues = g = df.groupby("species")
# here we only drop that groupby object into a dict ..
pdict = OrderedDict()
for i in g.groups.keys():
labels = g.get_group(i).columns
xname = labels[0]
yname = labels[1]
x = getattr(g.get_group(i), xname)
y = getattr(g.get_group(i), yname)
pdict[i] = zip(x, y)
# any of the following commented are valid Scatter inputs
#xyvalues = pdict
#xyvalues = pd.DataFrame(xyvalues)
#xyvalues = xyvalues.values()
#xyvalues = np.array(xyvalues.values())
output_file("iris_scatter.html")
TOOLS="resize,crosshair,pan,wheel_zoom,box_zoom,reset,previewsave"
scatter = Scatter(
xyvalues, filename="iris_scatter.html", tools=TOOLS, ylabel='petal_width'
)
show(scatter) # or scatter.show()
| gpl-2.0 |
mjgrav2001/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
miloharper/neural-network-animation | matplotlib/testing/jpl_units/UnitDblFormatter.py | 23 | 1485 | #===========================================================================
#
# UnitDblFormatter
#
#===========================================================================
"""UnitDblFormatter module containing class UnitDblFormatter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.ticker as ticker
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblFormatter' ]
#===========================================================================
class UnitDblFormatter( ticker.ScalarFormatter ):
"""The formatter for UnitDbl data types. This allows for formatting
with the unit string.
"""
def __init__( self, *args, **kwargs ):
'The arguments are identical to matplotlib.ticker.ScalarFormatter.'
ticker.ScalarFormatter.__init__( self, *args, **kwargs )
def __call__( self, x, pos = None ):
'Return the format for tick val x at position pos'
if len(self.locs) == 0:
return ''
else:
return str(x)
def format_data_short( self, value ):
"Return the value formatted in 'short' format."
return str(value)
def format_data( self, value ):
"Return the value formatted into a string."
return str(value)
| mit |
KHP-Informatics/sleepsight-analytics | scarp.py | 1 | 1058 | #A minimum example illustrating how to use a
#Gaussian Processes for binary classification
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.preprocessing import normalize
n = 1000
def stochProcess(x, a, b):
return np.exp(a*x) * np.cos(b*x)
def fx(processes, x):
samples = processes[x]
den = norm.pdf(samples)
idxSort = np.argsort(samples)
x = np.sort(samples)
y = den[idxSort]
return (x, y)
# STOCHASTIC PROCESS
a = np.random.uniform(low=0, high=1, size=n)
a = normalize(a.reshape(1, -1))[0]
a = a + (-a.min())
b = np.random.normal(size=n)
s = np.linspace(0, 2, num=100)
print(a)
## sampling
stoch = []
i = 0
for input in s:
output = [stochProcess(input, a[i], b[i]) for i in range(0, len(a))]
stoch.append(output)
## dist
x, y = fx(stoch, 50)
## plot
stochT = np.transpose(stoch)
stochDisplay = np.transpose([stochT[i] for i in range(0, 10)])
f, ax = plt.subplots(2, 2)
ax[0, 0].plot(s, stochDisplay)
ax[0, 1].plot(x, y)
#ax3.plot(stoch)
#ax4.plot(x, y)
plt.show()
| apache-2.0 |
jamespeterschinner/async_v20 | tests/test_definitions/test_base.py | 1 | 12433 | import logging
import ujson as json
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from async_v20.definitions.base import Model, Array, create_attribute
from async_v20.definitions.helpers import flatten_dict
from async_v20.definitions.primitives import TradeID, AccountID
from async_v20.definitions.types import Account
from async_v20.definitions.types import ArrayInstrument
from async_v20.definitions.types import ArrayOrder
from async_v20.definitions.types import ArrayPosition
from async_v20.definitions.types import ArrayStr
from async_v20.definitions.types import ArrayTrade
from async_v20.definitions.types import ArrayTransaction
from async_v20.definitions.types import Order
from async_v20.definitions.types import Position
from async_v20.definitions.types import Trade
from async_v20.definitions.types import TradeSummary
from async_v20.exceptions import InstantiationFailure, IncompatibleValue
from ..data.json_data import GETAccountID_response, example_trade_summary, example_changed_trade_summary
from ..data.json_data import account_example
from ..data.json_data import example_transactions, example_positions, example_instruments, example_trade_array
from ..fixtures.client import client
from ..fixtures.server import server
logger = logging.getLogger('async_v20')
logger.disabled = True
client = client
server = server
from pandas import Timestamp
@pytest.fixture
def account():
result = Account(**GETAccountID_response['account'])
yield result
del result
def test_account_has_correct_methods(account):
assert hasattr(account, 'dict')
assert hasattr(account, 'data')
assert hasattr(account, 'series')
@pytest.fixture
def test_kwargs():
kwargs = {'type': 'LIST', 'value': 'TEST_VALUE'}
yield kwargs
del kwargs
@pytest.fixture
def test_class():
class TestClass(Model):
_dispatch = {'type': 'LIST'}
test_cls = TestClass
yield test_cls
del test_cls
def test_base_dispatch_works_correctly():
pass
def test_json_dict_returns_correct_data_structure(account):
"""Test the result is formatted correctly. There is a requirement for
json_dict to be able to cast floats to strings, this is necessary when
serializing objects to send to OANDA. Though when used internally is it more
natural to leave floats as floats."""
result = account.dict(json=True, datetime_format='UNIX')
# Test result is a dict
assert type(result) == dict
flattened_result = flatten_dict(result)
# Test that all values have the correct data type
for value in flattened_result:
assert isinstance(value, (dict, str, int, list))
result = account.dict(json=False, datetime_format='UNIX')
assert type(result) == dict
flattened_result = flatten_dict(result)
# Test that all values have the correct data type. Specifically that
# all floats have not been casted to a string.
for value in flattened_result:
assert isinstance(value, (dict, float, str, int, list))
if isinstance(value, str):
with pytest.raises(ValueError):
float(value)
def test_json_data(account):
result = account.json(datetime_format='UNIX')
assert type(result) == str
assert json.loads(result) == account.dict(json=True, datetime_format='UNIX')
def test_data(account):
result = account.data(json=True, datetime_format='UNIX')
for value in result:
assert isinstance(value, (str, int, list))
result = account.data(json=False)
for value in result:
assert isinstance(value, (float, str, int, list))
if isinstance(value, str):
with pytest.raises(ValueError):
float(value)
def test_series_doesnt_convert_datetime(account):
result = account.series(datetime_format='UNIX')
for value in result:
assert isinstance(value, (float, str, int, list, type(None)))
if isinstance(value, str):
# All values in a series object should be a float if they can be
with pytest.raises(ValueError):
float(value)
def test_series_converts_time_to_datetime(account):
result = account.series()
with pytest.raises(AssertionError):
for value in result:
assert isinstance(value, (float, str, int, list, type(None)))
for value in result:
assert isinstance(value, (float, str, int, list, type(None), Timestamp))
def test_array_returns_instantiation_error():
class ArrayTest(Array, contains=int):
pass
with pytest.raises(InstantiationFailure):
instance = ArrayTest('ABC', 'DEF')
result = instance[0]
def test_array_with_no_dict_does_not_error_when_attempting_get_id():
class ArrayTest(Array, contains=int):
pass
instance = ArrayTest('ABC', 'DEF')
result = instance.get_id(1)
def test_create_attribute_returns_incompatible_error():
with pytest.raises(IncompatibleValue):
create_attribute(AccountID, TradeID(123))
with pytest.raises(IncompatibleValue):
create_attribute(ArrayStr, TradeID(123))
def test_model_update():
trade_summary = TradeSummary(**example_trade_summary)
changed_trade_summary = TradeSummary(**example_changed_trade_summary)
result = trade_summary.replace(**changed_trade_summary.dict(json=False))
merged = trade_summary.dict()
merged.update(changed_trade_summary.dict())
assert all(map(lambda x: x in merged, result.dict().keys()))
assert result.dict() == TradeSummary(**merged).dict()
def test_array_get_id_returns_id():
data = json.loads(example_transactions)
transactions = ArrayTransaction(*json.loads(example_transactions))
assert transactions.get_id(6607).id == 6607
assert transactions.get_id(123) == None
def test_array_get_instrument_returns_instrument():
positions = ArrayPosition(*json.loads(example_positions))
assert positions.get_instrument('AUD_USD').instrument == 'AUD_USD'
assert positions.get_instrument('EUR_USD') == None
instruments = ArrayInstrument(*json.loads(example_instruments))
assert instruments.get_instrument('AUD_USD').name == 'AUD_USD'
assert instruments.get_instrument('EUR_USD').name == 'EUR_USD'
def test_array_in_returns_true_when_instrument_is_present():
positions = ArrayPosition(*json.loads(example_positions))
assert 'AUD_USD' in positions
def test_array_in_returns_true_when_id_is_present():
trades = ArrayTrade(*example_trade_array)
assert 7105 in trades
def test_array_in_returns_true_when_object_is_present():
trades = ArrayTrade(*example_trade_array)
trade = Trade(**example_trade_array[0])
assert trade in trades
def test_model_raises_not_implemented_when_checking_equality():
assert (Trade(0) == '0') == False
def test_same_arrays_are_equal():
assert ArrayTrade(*example_trade_array) == ArrayTrade(*example_trade_array)
def test_array_returns_false_checking_equality():
assert (ArrayTrade(*example_trade_array) == 'ERROR') == False
def test_array_negative_indexing_works():
array = ArrayTrade(*example_trade_array)
assert array[-1] == array[len(array) - 1]
def test_array_items_cannot_be_modified():
array = ArrayTrade(*example_trade_array)
with pytest.raises(TypeError):
array[0] = None
def test_array_items_cannot_be_assigned_to():
array = ArrayTrade(*example_trade_array)
with pytest.raises(NotImplementedError):
array.test = 'ERROR'
def test_array_items_cannot_be_deleted_to():
array = ArrayTrade(*example_trade_array)
with pytest.raises(NotImplementedError):
del array.items
def test_array_raises_index_error():
array = ArrayTrade(*example_trade_array)
with pytest.raises(IndexError):
r = array[100]
def test_array_hash_returns_same_hash():
array_1 = ArrayTrade(*example_trade_array)
array_2 = ArrayTrade(*example_trade_array)
assert hash(array_1) == hash(array_2)
assert array_1 == array_2
def test_slicing_array_allows_for_equality_checking():
array_1 = ArrayInstrument(*json.loads(example_instruments))
array_2 = array_1[2:6:2]
assert array_1[2] == array_2[0]
assert array_1[4] == array_2[1]
@pytest.mark.asyncio
async def test_array_dataframe_returns_dataframe(client, server):
# Easier to get a real response from the fake server than to mock a response
async with client as client:
rsp = await client.get_candles('AUD_USD')
df = rsp.candles.dataframe()
assert type(df) == DataFrame
@pytest.mark.asyncio
async def test_array_dataframe_converts_datetimes_to_correct_type(client, server):
# Easier to get a real response from the fake server than to mock a response
async with client as client:
rsp = await client.get_candles('AUD_USD')
df = rsp.candles.dataframe()
assert type(df.time[0]) == pd.Timestamp
df = rsp.candles.dataframe(datetime_format='UNIX')
assert type(df.time[0]) == np.int64
assert len(str(df.time[0])) == 19
df = rsp.candles.dataframe(datetime_format='UNIX', json=True)
assert type(df.time[0]) == str
assert len(str(df.time[0])) == 20
df = rsp.candles.dataframe(datetime_format='RFC3339')
assert type(df.time[0]) == str
assert len(str(df.time[0])) == 30
assert type(df) == DataFrame
def test_create_attribute_raises_error_when_unable_to_construct_type():
with pytest.raises(InstantiationFailure):
attribute = create_attribute(int, 'This is not an int')
@pytest.mark.asyncio
async def test_array_get_instruments_returns_all_matching_objects(client, server):
async with client as client:
rsp = await client.list_open_trades()
trades = rsp.trades.get_instruments('AUD_USD')
assert len(trades) == 40
assert type(trades) == ArrayTrade
@pytest.mark.asyncio
async def test_array_get_instruments_returns_default(client, server):
async with client as client:
rsp = await client.list_open_trades()
trades = rsp.trades.get_instruments('NOTHING', 'DEFAULT')
assert trades == 'DEFAULT'
@pytest.mark.asyncio
async def test_array_get_instrument_returns_single_object(client, server):
async with client as client:
rsp = await client.list_positions()
position = rsp.positions.get_instrument('AUD_USD')
assert type(position) == Position
@pytest.mark.asyncio
async def test_array_get_instrument_returns_default(client, server):
async with client as client:
rsp = await client.list_positions()
position = rsp.positions.get_instrument('NOTHING', 'DEFAULT')
assert position == 'DEFAULT'
@pytest.mark.asyncio
async def test_array_get_trade_id_returns_single_object(client, server):
async with client as client:
rsp = await client.list_orders()
print(rsp.json())
order = rsp.orders.get_trade_id(34543)
assert type(order) == Order
@pytest.mark.asyncio
async def test_array_get_trade_id_returns_default(client, server):
async with client as client:
rsp = await client.list_orders()
order = rsp.orders.get_trade_id('NOTHING', 'DEFAULT')
assert order == 'DEFAULT'
@pytest.mark.asyncio
async def test_array_get_trade_id_returns_correct_object(client, server):
async with client as client:
rsp = await client.list_orders()
order = rsp.orders.get_trade_id(34543, type='TAKE_PROFIT')
assert order.type == 'TAKE_PROFIT'
order = rsp.orders.get_trade_id(34543, type='STOP_LOSS')
assert order.type == 'STOP_LOSS'
order = rsp.orders.get_trade_id(34543, default='DEFAULT', type='INVALID')
assert order == 'DEFAULT'
@pytest.mark.asyncio
async def test_array_get_trade_ids_returns_array_object(client, server):
async with client as client:
rsp = await client.list_orders()
print(rsp.json())
orders = rsp.orders.get_trade_ids(34543)
assert type(orders) == ArrayOrder
@pytest.mark.asyncio
async def test_array_get_trade_ids_returns_default(client, server):
async with client as client:
rsp = await client.list_orders()
print(rsp.json())
orders = rsp.orders.get_trade_ids('NOTHING', 'DEFAULT')
assert orders == 'DEFAULT'
def test_model_get_method():
account = Account(**account_example['account'])
assert account.get('id')
assert account.get('doenstexist') == None
| mit |
zygmuntz/pybrain-practice | kin_predict.py | 3 | 1038 | "get predictions for a test set"
import numpy as np
import cPickle as pickle
from math import sqrt
from pybrain.datasets.supervised import SupervisedDataSet as SDS
from sklearn.metrics import mean_squared_error as MSE
test_file = 'data/test.csv'
model_file = 'model.pkl'
output_predictions_file = 'predictions.txt'
# load model
net = pickle.load( open( model_file, 'rb' ))
# load data
test = np.loadtxt( test_file, delimiter = ',' )
x_test = test[:,0:-1]
y_test = test[:,-1]
y_test = y_test.reshape( -1, 1 )
# you'll need labels. In case you don't have them...
y_test_dummy = np.zeros( y_test.shape )
input_size = x_test.shape[1]
target_size = y_test.shape[1]
assert( net.indim == input_size )
assert( net.outdim == target_size )
# prepare dataset
ds = SDS( input_size, target_size )
ds.setField( 'input', x_test )
ds.setField( 'target', y_test_dummy )
# predict
p = net.activateOnDataset( ds )
mse = MSE( y_test, p )
rmse = sqrt( mse )
print "testing RMSE:", rmse
np.savetxt( output_predictions_file, p, fmt = '%.6f' )
| unlicense |
rs2/pandas | pandas/core/aggregation.py | 1 | 16060 | """
aggregation.py contains utility functions to handle multiple named and lambda
kwarg aggregations in groupby and DataFrame/Series aggregation
"""
from collections import defaultdict
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
from pandas._typing import AggFuncType, Axis, FrameOrSeries, Label
from pandas.core.dtypes.common import is_dict_like, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.base import SpecificationError
import pandas.core.common as com
from pandas.core.indexes.api import Index
if TYPE_CHECKING:
from pandas.core.series import Series
def reconstruct_func(
func: Optional[AggFuncType], **kwargs
) -> Tuple[bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]]]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
If named aggregation is applied, `func` will be None, and kwargs contains the
column and aggregation function information to be parsed;
If named aggregation is not applied, `func` is either string (e.g. 'min') or
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
If relabeling is True, will return relabeling, reconstructed func, column
names, and the reconstructed order of columns.
If relabeling is False, the columns and order will be None.
Parameters
----------
func: agg function (e.g. 'min' or Callable) or list of agg functions
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
normalize_keyword_aggregation function for relabelling
Returns
-------
relabelling: bool, if there is relabelling or not
func: normalized and mangled func
columns: list of column names
order: list of columns indices
Examples
--------
>>> reconstruct_func(None, **{"foo": ("col", "min")})
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
>>> reconstruct_func("min")
(False, 'min', None, None)
"""
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
columns: Optional[List[str]] = None
order: Optional[List[int]] = None
if not relabeling:
if isinstance(func, list) and len(func) > len(set(func)):
# GH 28426 will raise error if duplicated function names are used and
# there is no reassigned name
raise SpecificationError(
"Function names must be unique if there is no new column names "
"assigned"
)
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
if relabeling:
func, columns, order = normalize_keyword_aggregation(kwargs)
return relabeling, func, columns, order
def is_multi_agg_with_relabel(**kwargs) -> bool:
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a="max")
False
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
True
>>> is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
len(kwargs) > 0
)
def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]:
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
to the old Dict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
col_idx_order : List[int]
List of columns indices.
Examples
--------
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
"""
# Normalize the aggregation functions as Mapping[column, List[func]],
# process normally, then fixup the names.
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec: DefaultDict = defaultdict(list)
order = []
columns, pairs = list(zip(*kwargs.items()))
for name, (column, aggfunc) in zip(columns, pairs):
aggspec[column].append(aggfunc)
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
# uniquify aggfunc name if duplicated in order list
uniquified_order = _make_unique_kwarg_list(order)
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
# uniquified_aggspec will store uniquified order list and will compare it with order
# based on index
aggspec_order = [
(column, com.get_callable_name(aggfunc) or aggfunc)
for column, aggfuncs in aggspec.items()
for aggfunc in aggfuncs
]
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
# get the new index of columns by comparison
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
return aggspec, columns, col_idx_order
def _make_unique_kwarg_list(
seq: Sequence[Tuple[Any, Any]]
) -> Sequence[Tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list
Examples:
--------
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
>>> _make_unique_kwarg_list(kwarg_list)
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
"""
return [
(pair[0], "_".join([pair[1], str(seq[:i].count(pair))]))
if seq.count(pair) > 1
else pair
for i, pair in enumerate(seq)
]
# TODO: Can't use, because mypy doesn't like us setting __name__
# error: "partial[Any]" has no attribute "__name__"
# the type is:
# typing.Sequence[Callable[..., ScalarResult]]
# -> typing.Sequence[Callable[..., ScalarResult]]:
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
"""
Possibly mangle a list of aggfuncs.
Parameters
----------
aggfuncs : Sequence
Returns
-------
mangled: list-like
A new AggSpec sequence, where lambdas have been converted
to have unique names.
Notes
-----
If just one aggfunc is passed, the name will not be mangled.
"""
if len(aggfuncs) <= 1:
# don't mangle for .agg([lambda x: .])
return aggfuncs
i = 0
mangled_aggfuncs = []
for aggfunc in aggfuncs:
if com.get_callable_name(aggfunc) == "<lambda>":
aggfunc = partial(aggfunc)
aggfunc.__name__ = f"<lambda_{i}>"
i += 1
mangled_aggfuncs.append(aggfunc)
return mangled_aggfuncs
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to GroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> maybe_mangle_lambdas('sum')
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
def relabel_result(
result: FrameOrSeries,
func: Dict[str, List[Union[Callable, str]]],
columns: Iterable[Label],
order: Iterable[int],
) -> Dict[Label, "Series"]:
"""
Internal function to reorder result if relabelling is True for
dataframe.agg, and return the reordered result in dict.
Parameters:
----------
result: Result from aggregation
func: Dict of (column name, funcs)
columns: New columns name for relabelling
order: New order for relabelling
Examples:
---------
>>> result = DataFrame({"A": [np.nan, 2, np.nan],
... "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}) # doctest: +SKIP
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
>>> columns = ("foo", "aab", "bar", "dat")
>>> order = [0, 1, 2, 3]
>>> _relabel_result(result, func, columns, order) # doctest: +SKIP
dict(A=Series([2.0, NaN, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
C=Series([NaN, 6.0, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
B=Series([NaN, NaN, 2.5, 4.0], index=["foo", "aab", "bar", "dat"]))
"""
reordered_indexes = [
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
]
reordered_result_in_dict: Dict[Label, "Series"] = {}
idx = 0
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
for col, fun in func.items():
s = result[col].dropna()
# In the `_aggregate`, the callable names are obtained and used in `result`, and
# these names are ordered alphabetically. e.g.
# C2 C1
# <lambda> 1 NaN
# amax NaN 4.0
# max NaN 4.0
# sum 18.0 6.0
# Therefore, the order of functions for each column could be shuffled
# accordingly so need to get the callable name if it is not parsed names, and
# reorder the aggregated result for each column.
# e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
# [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
# reorder so that aggregated values map to their functions regarding the order.
# However there is only one column being used for aggregation, not need to
# reorder since the index is not sorted, and keep as is in `funcs`, e.g.
# A
# min 1.0
# mean 1.5
# mean 1.5
if reorder_mask:
fun = [
com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
]
col_idx_order = Index(s.index).get_indexer(fun)
s = s[col_idx_order]
# assign the new user-provided "named aggregation" as index names, and reindex
# it based on the whole user-provided names.
s.index = reordered_indexes[idx : idx + len(fun)]
reordered_result_in_dict[col] = s.reindex(columns, copy=False)
idx = idx + len(fun)
return reordered_result_in_dict
def validate_func_kwargs(
kwargs: dict,
) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provied keys.
func : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs
Examples
--------
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
(['one', 'two'], ['min', 'max'])
"""
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
tuple_given_message = "func is expected but received {} in **kwargs."
columns = list(kwargs)
func = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(tuple_given_message.format(type(col_func).__name__))
func.append(col_func)
if not columns:
raise TypeError(no_arg_message)
return columns, func
def transform(
obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs
) -> FrameOrSeries:
"""
Transform a DataFrame or Series
Parameters
----------
obj : DataFrame or Series
Object to compute the transform on.
func : string, function, list, or dictionary
Function(s) to compute the transform with.
axis : {0 or 'index', 1 or 'columns'}
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
Returns
-------
DataFrame or Series
Result of applying ``func`` along the given axis of the
Series or DataFrame.
Raises
------
ValueError
If the transform function fails or does not transform.
"""
is_series = obj.ndim == 1
if obj._get_axis_number(axis) == 1:
assert not is_series
return transform(obj.T, func, 0, *args, **kwargs).T
if isinstance(func, list):
if is_series:
func = {com.get_callable_name(v) or v: v for v in func}
else:
func = {col: func for col in obj}
if isinstance(func, dict):
return transform_dict_like(obj, func, *args, **kwargs)
# func is either str or callable
try:
result = transform_str_or_callable(obj, func, *args, **kwargs)
except Exception:
raise ValueError("Transform function failed")
# Functions that transform may return empty Series/DataFrame
# when the dtype is not appropriate
if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty:
raise ValueError("Transform function failed")
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
obj.index
):
raise ValueError("Function did not transform")
return result
def transform_dict_like(obj, func, *args, **kwargs):
"""
Compute transform in the case of a dict-like func
"""
from pandas.core.reshape.concat import concat
if obj.ndim != 1:
cols = sorted(set(func.keys()) - set(obj.columns))
if len(cols) > 0:
raise SpecificationError(f"Column(s) {cols} do not exist")
if any(isinstance(v, dict) for v in func.values()):
# GH 15931 - deprecation of renaming keys
raise SpecificationError("nested renamer is not supported")
results = {}
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
try:
results[name] = transform(colg, how, 0, *args, **kwargs)
except Exception as e:
if str(e) == "Function did not transform":
raise e
# combine results
if len(results) == 0:
raise ValueError("Transform function failed")
return concat(results, axis=1)
def transform_str_or_callable(obj, func, *args, **kwargs):
"""
Compute transform in the case of a string or callable func
"""
if isinstance(func, str):
return obj._try_aggregate_string_function(func, *args, **kwargs)
if not args and not kwargs:
f = obj._get_cython_func(func)
if f:
return getattr(obj, f)()
# Two possible ways to use a UDF - apply or call directly
try:
return obj.apply(func, args=args, **kwargs)
except Exception:
return func(obj, *args, **kwargs)
| bsd-3-clause |
RalphBariz/RalphsDotNet | Old/RalphsDotNet.Apps.OptimizationStudio/Resources/PyLib/numpy/linalg/linalg.py | 53 | 61098 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import sys
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
isfinite, size, finfo, absolute, log, exp
from numpy.lib import triu
from numpy.linalg import lapack_lite
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
"""
pass
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError, '%d-dimensional array given. Array must be \
two-dimensional' % len(a.shape)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError, 'Array must be square'
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError, "Array must not contain infs or NaNs"
def _assertNonEmpty(*arrays):
for a in arrays:
if size(a) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a,wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = range(0, an)
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : array_like, shape (M, M)
Coefficient matrix.
b : array_like, shape (M,) or (M, N)
Ordinate or "dependent variable" values.
Returns
-------
x : ndarray, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
`solve` is a wrapper for the LAPACK routines `dgesv`_ and
`zgesv`_, the former being used if `a` is real-valued, the latter if
it is complex-valued. The solution to the system of linear equations
is computed using an LU decomposition [1]_ with partial pivoting and
row interchanges.
.. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
.. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> (np.dot(a, x) == b).all()
True
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
one_eq = len(b.shape) == 1
if one_eq:
b = b[:, newaxis]
_assertRank2(a, b)
_assertSquareness(a)
n_eq = a.shape[0]
n_rhs = b.shape[1]
if n_eq != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
# lapack_routine = _findLapackRoutine('gesv', t)
if isComplexType(t):
lapack_routine = lapack_lite.zgesv
else:
lapack_routine = lapack_lite.dgesv
a, b = _fastCopyAndTranspose(t, a, b)
a, b = _to_native_byte_order(a, b)
pivots = zeros(n_eq, fortran_int)
results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise LinAlgError, 'Singular matrix'
if one_eq:
return wrap(b.ravel().astype(result_t))
else:
return wrap(b.transpose().astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError, "Invalid ind argument."
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : array_like, shape (M, M)
Matrix to be inverted.
Returns
-------
ainv : ndarray or matrix, shape (M, M)
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular or not square.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = LA.inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = LA.inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
"""
a, wrap = _makearray(a)
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : array_like, shape (M, M)
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : ndarray, or matrix object if `a` is, shape (M, M)
Lower-triangular Cholesky factor of a.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
m = a.shape[0]
n = a.shape[1]
if isComplexType(t):
lapack_routine = lapack_lite.zpotrf
else:
lapack_routine = lapack_lite.dpotrf
results = lapack_routine(_L, n, a, m, 0)
if results['info'] > 0:
raise LinAlgError, 'Matrix is not positive definite - \
Cholesky decomposition cannot be computed'
s = triu(a, k=0).transpose()
if (s.dtype != result_t):
s = s.astype(result_t)
return wrap(s)
# QR decompostion
def qr(a, mode='full'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like
Matrix to be factored, of shape (M, N).
mode : {'full', 'r', 'economic'}, optional
Specifies the values to be returned. 'full' is the default.
Economic mode is slightly faster then 'r' mode if only `r` is needed.
Returns
-------
q : ndarray of float or complex, optional
The orthonormal matrix, of shape (M, K). Only returned if
``mode='full'``.
r : ndarray of float or complex, optional
The upper-triangular matrix, of shape (K, N) with K = min(M, N).
Only returned when ``mode='full'`` or ``mode='r'``.
a2 : ndarray of float or complex, optional
Array of shape (M, N), only returned when ``mode='economic``'.
The diagonal and the upper triangle of `a2` contains `r`, while
the rest of the matrix is undefined.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
all the return values will be matrices too.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# economic mode. Isn't actually economic.
if mode[0] == 'e':
if t != result_t :
a = a.astype(result_t)
return a.T
# generate r
r = _fastCopyAndTranspose(result_t, a[:,:mn])
for i in range(mn):
r[i,:i].fill(0.0)
# 'r'-mode, that is, calculate only r
if mode[0] == 'r':
return r
# from here on: build orthonormal matrix q from a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
q = _fastCopyAndTranspose(result_t, a[:mn,:])
return wrap(q), wrap(r)
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
that sets those routines' flags to return only the eigenvalues of
general real and complex arrays, respectively.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
rwork = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, lwork, 0)
if all(wi == 0.):
w = wr
result_t = _realType(result_t)
else:
w = wr+1j*wi
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd
that sets those routines' flags to return only the eigenvalues of
real symmetric and complex Hermitian arrays, respectively.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : array_like, shape (M, M)
A square array of real or complex elements.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered, nor are they
necessarily real for real arrays (though for real arrays
complex-valued eigenvalues should occur in conjugate pairs).
v : ndarray, shape (M, M)
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
which compute the eigenvalues and eigenvectors of, respectively,
general real- and complex-valued square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
a, t, result_t = _convertarray(a) # convert to double or cdouble type
a = _to_native_byte_order(a)
real_t = _linalgRealType(t)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
# Complex routines take different arguments
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
v = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
rwork = zeros((2*n,), real_t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
vr = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, lwork, 0)
if all(wi == 0.0):
w = wr
v = vr
result_t = _realType(result_t)
else:
w = wr+1j*wi
v = array(vr, w.dtype)
ind = flatnonzero(wi != 0.0) # indices of complex e-vals
for i in range(len(ind)//2):
v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
vt = v.transpose().astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : array_like, shape (M, M)
A complex Hermitian or real symmetric matrix.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered.
v : ndarray, or matrix object if `a` is, shape (M, M)
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd,
which compute the eigenvalues and eigenvectors of real symmetric and
complex Hermitian arrays, respectively.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
at = a.transpose().astype(result_t)
return w.astype(_realType(result_t)), wrap(at)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : ndarray
Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
depending on value of ``full_matrices``.
s : ndarray
The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
a 1-d array of length min(`M`, `N`).
v : ndarray
Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
``full_matrices``.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
m, n = a.shape
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
s = zeros((min(n, m),), real_t)
if compute_uv:
if full_matrices:
nu = m
nvt = n
option = _A
else:
nu = min(n, m)
nvt = min(n, m)
option = _S
u = zeros((nu, m), t)
vt = zeros((n, nvt), t)
else:
option = _N
nu = 1
nvt = 1
u = empty((1, 1), t)
vt = empty((1, 1), t)
iwork = zeros((8*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgesdd
rwork = zeros((5*min(m, n)*min(m, n) + 5*min(m, n),), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgesdd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge'
s = s.astype(_realType(result_t))
if compute_uv:
u = u.transpose().astype(result_t)
vt = vt.transpose().astype(result_t)
return wrap(u), s, wrap(vt)
else:
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : array_like, shape (M, N)
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x,compute_uv=False)
return s[0]/s[-1]
else:
return norm(x,p)*norm(inv(x),p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the
array that are greater than `tol`.
Parameters
----------
M : array_like
array of <=2 dimensions
tol : {None, float}
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * eps``.
Notes
-----
Golub and van Loan [1]_ define "numerical rank deficiency" as using
tol=eps*S[0] (where S[0] is the maximum singular value and thus the
2-norm of the matrix). This is one definition of rank deficiency,
and the one we use here. When floating point roundoff is the main
concern, then "numerical rank deficiency" is a reasonable choice. In
some cases you may prefer other definitions. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements
with uncertainties greater than floating point epsilon, choosing a
tolerance near that uncertainty may be preferable. The tolerance
may be absolute if the uncertainties are absolute rather than
relative.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.
Baltimore: Johns Hopkins University Press, 1996.
Examples
--------
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : ndarray, shape (N, M)
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNonEmpty(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
sign : float or complex
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : float
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to `sign * np.exp(logdet)`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 2.0.0.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
See Also
--------
det
"""
a = asarray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
if isComplexType(t):
lapack_routine = lapack_lite.zgetrf
else:
lapack_routine = lapack_lite.dgetrf
pivots = zeros((n,), fortran_int)
results = lapack_routine(n, n, a, n, pivots, 0)
info = results['info']
if (info < 0):
raise TypeError, "Illegal input to Fortran routine"
elif (info > 0):
return (t(0.0), _realType(t)(-Inf))
sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
d = diagonal(a)
absd = absolute(d)
sign *= multiply.reduce(d / absd)
log(absd, absd)
logdet = add.reduce(absd, axis=-1)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
det : ndarray
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
"""
sign, logdet = slogdet(a)
return sign * exp(logdet)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : array_like, shape (M, N)
"Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : ndarray, shape (N,) or (N, K)
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residues : ndarray, shape (), (1,), or (K,)
Sums of residues; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or > M, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : ndarray, shape (min(M,N),)
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0],:n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge in Linear Least Squares'
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def norm(x, ord=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like, shape (M,) or (M, N)
Input array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float
Norm of the matrix or vector.
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
"""
x = asarray(x)
if ord is None: # check the default case first and handle it immediately
return sqrt(add.reduce((x.conj() * x).ravel().real))
nd = x.ndim
if nd == 1:
if ord == Inf:
return abs(x).max()
elif ord == -Inf:
return abs(x).min()
elif ord == 0:
return (x != 0).sum() # Zero norm
elif ord == 1:
return abs(x).sum() # special case for speedup
elif ord == 2:
return sqrt(((x.conj()*x).real).sum()) # special case for speedup
else:
try:
ord + 1
except TypeError:
raise ValueError, "Invalid norm order for vectors."
return ((abs(x)**ord).sum())**(1.0/ord)
elif nd == 2:
if ord == 2:
return svd(x, compute_uv=0).max()
elif ord == -2:
return svd(x, compute_uv=0).min()
elif ord == 1:
return abs(x).sum(axis=0).max()
elif ord == Inf:
return abs(x).sum(axis=1).max()
elif ord == -1:
return abs(x).sum(axis=0).min()
elif ord == -Inf:
return abs(x).sum(axis=1).min()
elif ord in ['fro','f']:
return sqrt(add.reduce((x.conj() * x).real.ravel()))
else:
raise ValueError, "Invalid norm order for matrices."
else:
raise ValueError, "Improper number of dimensions to norm."
| gpl-3.0 |
adwasser/masktools | masktools/superskims/plotting.py | 1 | 3437 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from itertools import cycle
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from .utils import mask_to_sky
__all__ = ['plot_mask', 'plot_galaxy']
def slit_patches(mask, color=None, sky_coords=False, center=None):
'''
Constructs mpl patches for the slits of a mask. If sky_coords is true,
output in relative ra/dec. galaxy center is necessary for sky_coords
'''
patches = []
for slit in mask.slits:
x = slit.x
y = slit.y
dx = slit.length
dy = slit.width
# bottom left-hand corner
if sky_coords:
L = np.sqrt(dx**2 + dy**2) / 2
alpha = np.tan(dy / dx)
phi = np.pi / 2 - np.radians(slit.pa)
delta_x = L * (np.cos(alpha + phi) - np.cos(alpha))
delta_y = L * (np.sin(alpha + phi) - np.sin(alpha))
ra, dec = mask_to_sky(x - dx / 2, y - dy / 2, mask.mask_pa)
blc0 = (ra, dec)
angle = (90 - slit.pa)
blc = (ra + delta_x, dec - delta_y)
# blc = (ra + x1 + x2, dec - y1 + y2)
else:
blc = (x - dx / 2, y - dy / 2)
angle = slit.pa - mask.mask_pa
patches.append(mpl.patches.Rectangle(blc, dx, dy, angle=angle,
fc=color, ec='k', alpha=0.5))
# patches.append(mpl.patches.Rectangle(blc0, dx, dy, angle=0,
# fc=color, ec='k', alpha=0.1))
return patches
def plot_mask(mask, color=None, writeto=None, annotate=False):
'''Plot the slits in a mask, in mask coords'''
fig, ax = plt.subplots()
for p in slit_patches(mask, color=color, sky_coords=False):
ax.add_patch(p)
if annotate:
for slit in mask.slits:
ax.text(slit.x - 3, slit.y + 1, slit.name, size=8)
xlim = mask.x_max / 2
ylim = mask.y_max / 2
lim = min(xlim, ylim)
ax.set_title(mask.name)
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
ax.set_xlabel('x offset (arcsec)', fontsize=16)
ax.set_ylabel('y offset (arcsec)', fontsize=16)
if writeto is not None:
fig.savefig(writeto)
return fig, ax
def plot_galaxy(galaxy, writeto=None):
'''Plot all slit masks'''
fig, ax = plt.subplots()
colors = cycle(['r', 'b', 'm', 'c', 'g'])
handles = []
for i, mask in enumerate(galaxy.masks):
color = next(colors)
label = str(i + 1) + galaxy.name + ' (PA = {:.2f})'.format(mask.mask_pa)
handles.append(mpl.patches.Patch(fc=color, ec='k',
alpha=0.5, label=label))
for p in slit_patches(mask, color=color,
sky_coords=True, center=galaxy.center):
ax.add_patch(p)
xlim = galaxy.masks[0].x_max / 2
ylim = galaxy.masks[0].y_max / 2
lim = min(xlim, ylim)
# reverse x axis so it looks like sky
ax.set_xlim(lim, -lim)
# ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
ax.set_title(galaxy.name, fontsize=16)
ax.set_xlabel('RA offset (arcsec)', fontsize=16)
ax.set_ylabel('Dec offset (arcsec)', fontsize=16)
ax.legend(handles=handles, loc='best')
if writeto is not None:
fig.savefig(writeto) #, bbox_inches='tight')
return fig, ax
| mit |
jlegendary/scikit-learn | sklearn/tree/tests/test_export.py | 76 | 9318 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
| bsd-3-clause |
freedomDR/shiny-robot | homework/project/project0202.py | 1 | 1432 | import cv2 as cv
import matplotlib.pyplot as plt
def show(img_show, position):
plt.subplot(position[0], position[1], position[2])
plt.imshow(img_show, cmap='Greys_r')
plt.xticks([]), plt.yticks([])
plt.title(position)
# plt.tight_layout()
if __name__ == '__main__':
plt.figure(1)
img = cv.imread('../../ImageMaterial/DIP3E_Original_Images_CH02/Fig0221(a)(ctskull-256).tif', cv.IMREAD_GRAYSCALE)
k = 2
img128 = [[img[x][y] - img[x][y] % k for y in range(img.shape[1])] for x in range(img.shape[0])]
k = 4
img64 = [[img[x][y] - img[x][y] % k for y in range(img.shape[1])] for x in range(img.shape[0])]
k = 8
img32 = [[img[x][y] - img[x][y] % k for y in range(img.shape[1])] for x in range(img.shape[0])]
k = 16
img16 = [[img[x][y] - img[x][y] % k for y in range(img.shape[1])] for x in range(img.shape[0])]
k = 32
img8 = [[img[x][y] - img[x][y] % k for y in range(img.shape[1])] for x in range(img.shape[0])]
k = 64
img4 = [[img[x][y] - img[x][y] % k for y in range(img.shape[1])] for x in range(img.shape[0])]
k = 128
img2 = [[img[x][y] - img[x][y] % k for y in range(img.shape[1])] for x in range(img.shape[0])]
show(img, [2, 4, 1])
show(img128, [2, 4, 2])
show(img64, [2, 4, 3])
show(img32, [2, 4, 4])
show(img16, [2, 4, 5])
show(img8, [2, 4, 6])
show(img4, [2, 4, 7])
show(img2, [2, 4, 8])
plt.show()
| gpl-3.0 |
mne-tools/mne-tools.github.io | dev/_downloads/63cab32016602394f025dbe0ed7f501b/30_filtering_resampling.py | 10 | 13855 | # -*- coding: utf-8 -*-
"""
.. _tut-filter-resample:
Filtering and resampling data
=============================
This tutorial covers filtering and resampling, and gives examples of how
filtering can be used for artifact repair.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. We'll also crop the data to 60 seconds
(to save memory on the documentation server):
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(0, 60).load_data() # use just 60 seconds of data, to save memory
###############################################################################
# Background on filtering
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# A filter removes or attenuates parts of a signal. Usually, filters act on
# specific *frequency ranges* of a signal — for example, suppressing all
# frequency components above or below a certain cutoff value. There are *many*
# ways of designing digital filters; see :ref:`disc-filtering` for a longer
# discussion of the various approaches to filtering physiological signals in
# MNE-Python.
#
#
# Repairing artifacts by filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Artifacts that are restricted to a narrow frequency range can sometimes
# be repaired by filtering the data. Two examples of frequency-restricted
# artifacts are slow drifts and power line noise. Here we illustrate how each
# of these can be repaired by filtering.
#
#
# Slow drifts
# ~~~~~~~~~~~
#
# Low-frequency drifts in raw data can usually be spotted by plotting a fairly
# long span of data with the :meth:`~mne.io.Raw.plot` method, though it is
# helpful to disable channel-wise DC shift correction to make slow drifts
# more readily visible. Here we plot 60 seconds, showing all the magnetometer
# channels:
mag_channels = mne.pick_types(raw.info, meg='mag')
raw.plot(duration=60, order=mag_channels, proj=False,
n_channels=len(mag_channels), remove_dc=False)
###############################################################################
# A half-period of this slow drift appears to last around 10 seconds, so a full
# period would be 20 seconds, i.e., :math:`\frac{1}{20} \mathrm{Hz}`. To be
# sure those components are excluded, we want our highpass to be *higher* than
# that, so let's try :math:`\frac{1}{10} \mathrm{Hz}` and :math:`\frac{1}{5}
# \mathrm{Hz}` filters to see which works best:
for cutoff in (0.1, 0.2):
raw_highpass = raw.copy().filter(l_freq=cutoff, h_freq=None)
fig = raw_highpass.plot(duration=60, order=mag_channels, proj=False,
n_channels=len(mag_channels), remove_dc=False)
fig.subplots_adjust(top=0.9)
fig.suptitle('High-pass filtered at {} Hz'.format(cutoff), size='xx-large',
weight='bold')
###############################################################################
# Looks like 0.1 Hz was not quite high enough to fully remove the slow drifts.
# Notice that the text output summarizes the relevant characteristics of the
# filter that was created. If you want to visualize the filter, you can pass
# the same arguments used in the call to :meth:`raw.filter()
# <mne.io.Raw.filter>` above to the function :func:`mne.filter.create_filter`
# to get the filter parameters, and then pass the filter parameters to
# :func:`mne.viz.plot_filter`. :func:`~mne.filter.create_filter` also requires
# parameters ``data`` (a :class:`NumPy array <numpy.ndarray>`) and ``sfreq``
# (the sampling frequency of the data), so we'll extract those from our
# :class:`~mne.io.Raw` object:
filter_params = mne.filter.create_filter(raw.get_data(), raw.info['sfreq'],
l_freq=0.2, h_freq=None)
###############################################################################
# Notice that the output is the same as when we applied this filter to the data
# using :meth:`raw.filter() <mne.io.Raw.filter>`. You can now pass the filter
# parameters (and the sampling frequency) to :func:`~mne.viz.plot_filter` to
# plot the filter:
mne.viz.plot_filter(filter_params, raw.info['sfreq'], flim=(0.01, 5))
###############################################################################
# .. _tut-section-line-noise:
#
# Power line noise
# ~~~~~~~~~~~~~~~~
#
# Power line noise is an environmental artifact that manifests as persistent
# oscillations centered around the `AC power line frequency`_. Power line
# artifacts are easiest to see on plots of the spectrum, so we'll use
# :meth:`~mne.io.Raw.plot_psd` to illustrate. We'll also write a little
# function that adds arrows to the spectrum plot to highlight the artifacts:
def add_arrows(axes):
# add some arrows at 60 Hz and its harmonics
for ax in axes:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
# get ymax of a small region around the freq. of interest
y = psds[(idx - 4):(idx + 5)].max()
ax.arrow(x=freqs[idx], y=y + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
fig = raw.plot_psd(fmax=250, average=True)
add_arrows(fig.axes[:2])
###############################################################################
# It should be evident that MEG channels are more susceptible to this kind of
# interference than EEG that is recorded in the magnetically shielded room.
# Removing power-line noise can be done with a notch filter,
# applied directly to the :class:`~mne.io.Raw` object, specifying an array of
# frequencies to be attenuated. Since the EEG channels are relatively
# unaffected by the power line noise, we'll also specify a ``picks`` argument
# so that only the magnetometers and gradiometers get filtered:
meg_picks = mne.pick_types(raw.info, meg=True)
freqs = (60, 120, 180, 240)
raw_notch = raw.copy().notch_filter(freqs=freqs, picks=meg_picks)
for title, data in zip(['Un', 'Notch '], [raw, raw_notch]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
###############################################################################
# :meth:`~mne.io.Raw.notch_filter` also has parameters to control the notch
# width, transition bandwidth and other aspects of the filter. See the
# docstring for details.
#
# It's also possible to try to use a spectrum fitting routine to notch filter.
# In principle it can automatically detect the frequencies to notch, but our
# implementation generally does not do so reliably, so we specify the
# frequencies to remove instead, and it does a good job of removing the
# line noise at those frequencies:
raw_notch_fit = raw.copy().notch_filter(
freqs=freqs, picks=meg_picks, method='spectrum_fit', filter_length='10s')
for title, data in zip(['Un', 'spectrum_fit '], [raw, raw_notch_fit]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
###############################################################################
# Resampling
# ^^^^^^^^^^
#
# EEG and MEG recordings are notable for their high temporal precision, and are
# often recorded with sampling rates around 1000 Hz or higher. This is good
# when precise timing of events is important to the experimental design or
# analysis plan, but also consumes more memory and computational resources when
# processing the data. In cases where high-frequency components of the signal
# are not of interest and precise timing is not needed (e.g., computing EOG or
# ECG projectors on a long recording), downsampling the signal can be a useful
# time-saver.
#
# In MNE-Python, the resampling methods (:meth:`raw.resample()
# <mne.io.Raw.resample>`, :meth:`epochs.resample() <mne.Epochs.resample>` and
# :meth:`evoked.resample() <mne.Evoked.resample>`) apply a low-pass filter to
# the signal to avoid `aliasing`_, so you don't need to explicitly filter it
# yourself first. This built-in filtering that happens when using
# :meth:`raw.resample() <mne.io.Raw.resample>`, :meth:`epochs.resample()
# <mne.Epochs.resample>`, or :meth:`evoked.resample() <mne.Evoked.resample>` is
# a brick-wall filter applied in the frequency domain at the `Nyquist
# frequency`_ of the desired new sampling rate. This can be clearly seen in the
# PSD plot, where a dashed vertical line indicates the filter cutoff; the
# original data had an existing lowpass at around 172 Hz (see
# ``raw.info['lowpass']``), and the data resampled from 600 Hz to 200 Hz gets
# automatically lowpass filtered at 100 Hz (the `Nyquist frequency`_ for a
# target rate of 200 Hz):
raw_downsampled = raw.copy().resample(sfreq=200)
for data, title in zip([raw, raw_downsampled], ['Original', 'Downsampled']):
fig = data.plot_psd(average=True)
fig.subplots_adjust(top=0.9)
fig.suptitle(title)
plt.setp(fig.axes, xlim=(0, 300))
###############################################################################
# Because resampling involves filtering, there are some pitfalls to resampling
# at different points in the analysis stream:
#
# - Performing resampling on :class:`~mne.io.Raw` data (*before* epoching) will
# negatively affect the temporal precision of Event arrays, by causing
# `jitter`_ in the event timing. This reduced temporal precision will
# propagate to subsequent epoching operations.
#
# - Performing resampling *after* epoching can introduce edge artifacts *on
# every epoch*, whereas filtering the :class:`~mne.io.Raw` object will only
# introduce artifacts at the start and end of the recording (which is often
# far enough from the first and last epochs to have no affect on the
# analysis).
#
# The following section suggests best practices to mitigate both of these
# issues.
#
#
# Best practices
# ~~~~~~~~~~~~~~
#
# To avoid the reduction in temporal precision of events that comes with
# resampling a :class:`~mne.io.Raw` object, and also avoid the edge artifacts
# that come with filtering an :class:`~mne.Epochs` or :class:`~mne.Evoked`
# object, the best practice is to:
#
# 1. low-pass filter the :class:`~mne.io.Raw` data at or below
# :math:`\frac{1}{3}` of the desired sample rate, then
#
# 2. decimate the data after epoching, by either passing the ``decim``
# parameter to the :class:`~mne.Epochs` constructor, or using the
# :meth:`~mne.Epochs.decimate` method after the :class:`~mne.Epochs` have
# been created.
#
# .. warning::
# The recommendation for setting the low-pass corner frequency at
# :math:`\frac{1}{3}` of the desired sample rate is a fairly safe rule of
# thumb based on the default settings in :meth:`raw.filter()
# <mne.io.Raw.filter>` (which are different from the filter settings used
# inside the :meth:`raw.resample() <mne.io.Raw.resample>` method). If you
# use a customized lowpass filter (specifically, if your transition
# bandwidth is wider than 0.5× the lowpass cutoff), downsampling to 3× the
# lowpass cutoff may still not be enough to avoid `aliasing`_, and
# MNE-Python will not warn you about it (because the :class:`raw.info
# <mne.Info>` object only keeps track of the lowpass cutoff, not the
# transition bandwidth). Conversely, if you use a steeper filter, the
# warning may be too sensitive. If you are unsure, plot the PSD of your
# filtered data *before decimating* and ensure that there is no content in
# the frequencies above the `Nyquist frequency`_ of the sample rate you'll
# end up with *after* decimation.
#
# Note that this method of manually filtering and decimating is exact only when
# the original sampling frequency is an integer multiple of the desired new
# sampling frequency. Since the sampling frequency of our example data is
# 600.614990234375 Hz, ending up with a specific sampling frequency like (say)
# 90 Hz will not be possible:
current_sfreq = raw.info['sfreq']
desired_sfreq = 90 # Hz
decim = np.round(current_sfreq / desired_sfreq).astype(int)
obtained_sfreq = current_sfreq / decim
lowpass_freq = obtained_sfreq / 3.
raw_filtered = raw.copy().filter(l_freq=None, h_freq=lowpass_freq)
events = mne.find_events(raw_filtered)
epochs = mne.Epochs(raw_filtered, events, decim=decim)
print('desired sampling frequency was {} Hz; decim factor of {} yielded an '
'actual sampling frequency of {} Hz.'
.format(desired_sfreq, decim, epochs.info['sfreq']))
###############################################################################
# If for some reason you cannot follow the above-recommended best practices,
# you should at the very least either:
#
# 1. resample the data *after* epoching, and make your epochs long enough that
# edge effects from the filtering do not affect the temporal span of the
# epoch that you hope to analyze / interpret; or
#
# 2. perform resampling on the :class:`~mne.io.Raw` object and its
# corresponding Events array *simultaneously* so that they stay more or less
# in synch. This can be done by passing the Events array as the
# ``events`` parameter to :meth:`raw.resample() <mne.io.Raw.resample>`.
#
#
# .. LINKS
#
# .. _`AC power line frequency`:
# https://en.wikipedia.org/wiki/Mains_electricity
# .. _`aliasing`: https://en.wikipedia.org/wiki/Anti-aliasing_filter
# .. _`jitter`: https://en.wikipedia.org/wiki/Jitter
# .. _`Nyquist frequency`: https://en.wikipedia.org/wiki/Nyquist_frequency
| bsd-3-clause |
jundongl/scikit-feast | skfeature/example/test_svm_forward.py | 3 | 1403 | import scipy.io
from sklearn.cross_validation import KFold
from skfeature.function.wrapper import svm_forward
from sklearn import svm
from sklearn.metrics import accuracy_score
def main():
# load data
mat = scipy.io.loadmat('../data/COIL20.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the idx of selected features from the training set
idx = svm_forward.svm_forward(X[train], y[train], n_features)
# obtain the dataset on the selected features
X_selected = X[:, idx]
# train a classification model with the selected features on the training dataset
clf.fit(X_selected[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(X_selected[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| gpl-2.0 |
herilalaina/scikit-learn | sklearn/neighbors/__init__.py | 71 | 1025 | """
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
from .lof import LocalOutlierFactor
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest',
'LocalOutlierFactor']
| bsd-3-clause |
wangyum/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
ninotoshi/tensorflow | tensorflow/contrib/learn/python/learn/tests/test_estimators.py | 7 | 2438 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
class CustomOptimizer(tf.test.TestCase):
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
def custom_optimizer(learning_rate):
return tf.train.MomentumOptimizer(learning_rate, 0.9)
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=400,
learning_rate=exp_decay,
optimizer=custom_optimizer)
classifier.fit(X_train, y_train)
score = accuracy_score(y_test, classifier.predict(X_test))
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
simonsfoundation/CaImAn | use_cases/granule_cells/patches_pf.py | 2 | 10652 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 24 18:39:45 2016
@author: Andrea Giovannucci
For explanation consult at https://github.com/agiovann/Constrained_NMF/releases/download/v0.4-alpha/Patch_demo.zip
and https://github.com/agiovann/Constrained_NMF
"""
from __future__ import division
from __future__ import print_function
#%%
from builtins import str
from past.utils import old_div
try:
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
print((1))
except:
print('Not launched under iPython')
import matplotlib as mpl
mpl.use('TKAgg')
import sys
import numpy as np
import ca_source_extraction as cse
from time import time
import pylab as pl
import psutil
import glob
import os
import scipy
from ipyparallel import Client
#%%
backend = 'local'
if backend == 'SLURM':
n_processes = np.int(os.environ.get('SLURM_NPROCS'))
else:
# roughly number of cores on your machine minus 1
n_processes = np.maximum(np.int(psutil.cpu_count()), 1)
print(('using ' + str(n_processes) + ' processes'))
#%% start cluster for efficient computation
single_thread = False
if single_thread:
dview = None
else:
try:
c.close()
except:
print('C was not existing, creating one')
print("Stopping cluster to avoid unnencessary use of memory....")
sys.stdout.flush()
if backend == 'SLURM':
try:
cse.utilities.stop_server(is_slurm=True)
except:
print('Nothing to stop')
slurm_script = '/mnt/xfs1/home/agiovann/SOFTWARE/Constrained_NMF/SLURM/slurmStart.sh'
cse.utilities.start_server(slurm_script=slurm_script)
pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
c = Client(ipython_dir=pdir, profile=profile)
else:
cse.utilities.stop_server()
cse.utilities.start_server()
c = Client()
print(('Using ' + str(len(c)) + ' processes'))
dview = c[:len(c)]
#%% FOR LOADING ALL TIFF FILES IN A FILE AND SAVING THEM ON A SINGLE MEMORY MAPPABLE FILE
fnames = []
# folder containing the demo files
base_folder = '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b38/20160726150632/'
for file in glob.glob(os.path.join(base_folder, '*.hdf5')):
if file.endswith(""):
fnames.append(os.path.abspath(file))
fnames.sort()
print(fnames)
fnames = fnames
#%% Create a unique file fot the whole dataset
# THIS IS ONLY IF YOU NEED TO SELECT A SUBSET OF THE FIELD OF VIEW
# fraction_downsample=1;
# idx_x=slice(10,502,None)
# idx_y=slice(10,502,None)
# fname_new=cse.utilities.save_memmap(fnames,base_name='Yr',resize_fact=(1,1,fraction_downsample),remove_init=0,idx_xy=(idx_x,idx_y))
#%%
# idx_x=slice(12,500,None)
# idx_y=slice(12,500,None)
# idx_xy=(idx_x,idx_y)
downsample_factor = .3 # use .2 or .1 if file is large and you want a quick answer
idx_xy = None
base_name = 'Yr'
name_new = cse.utilities.save_memmap_each(fnames, dview=dview, base_name=base_name, resize_fact=(
1, 1, downsample_factor), remove_init=0, idx_xy=idx_xy)
name_new.sort(key=lambda fn: np.int(os.path.split(
fn)[-1][len(base_name):os.path.split(fn)[-1].find('_')]))
print(name_new)
#%%
n_chunks = 6 # increase this number if you have memory issues at this point
fname_new = cse.utilities.save_memmap_join(
name_new, base_name='Yr', n_chunks=6, dview=dview)
#%% Create a unique file fot the whole dataset
##
# fraction_downsample=1; # useful to downsample the movie across time. fraction_downsample=.1 measn downsampling by a factor of 10
# fname_new=cse.utilities.save_memmap(fnames,base_name='Yr',resize_fact=(1,1,fraction_downsample),order='F')
#%%
#%%
# fname_new='Yr_d1_501_d2_398_d3_1_order_F_frames_369_.mmap'
Yr, dims, T = cse.utilities.load_memmap(fname_new)
d1, d2 = dims
Y = np.reshape(Yr, dims + (T,), order='F')
#%%
Cn = cse.utilities.local_correlations(Y[:, :, :3000])
pl.imshow(Cn, cmap='gray')
#%%
rf = [15, 512] # half-size of the patches in pixels. rf=25, patches are 50x50
stride = [5, 1] # amounpl.it of overlap between the patches in pixels
K = 8 # number of neurons expected per patch
gSig = [] # expected half size of neurons
merge_thresh = 0.8 # merging threshold, max correlation allowed
p = 1 # order of the autoregressive system
memory_fact = 1 # unitless number accounting how much memory should be used. You will need to try different values to see which one would work the default is OK for a 16 GB system
save_results = True
#%% RUN ALGORITHM ON PATCHES
options_patch = cse.utilities.CNMFSetParms(
Y, n_processes, p=0, gSig=gSig, K=K, ssub=1, tsub=4, thr=merge_thresh)
options_patch['init_params']['method'] = 'sparse_nmf'
options_patch['init_params']['tsub'] = 4
options_patch['init_params']['ssub'] = 1
options_patch['init_params']['alpha_snmf'] = 10e2
options_patch['patch_params']['only_init'] = True
A_tot, C_tot, b, f, sn_tot, optional_outputs = cse.map_reduce.run_CNMF_patches(fname_new, (d1, d2, T), options_patch, rf=rf, stride=stride,
dview=dview, memory_fact=memory_fact)
print(('Number of components:' + str(A_tot.shape[-1])))
#%%
if save_results:
np.savez(os.path.join(base_folder, 'results_analysis_patch.npz'),
A_tot=A_tot.todense(), C_tot=C_tot, sn_tot=sn_tot, d1=d1, d2=d2, b=b, f=f)
#%% if you have many components this might take long!
pl.figure()
crd = cse.utilities.plot_contours(A_tot, Cn, thr=0.9)
#%% set parameters for full field of view analysis
options = cse.utilities.CNMFSetParms(
Y, n_processes, p=0, gSig=gSig, K=A_tot.shape[-1], thr=merge_thresh)
pix_proc = np.minimum(np.int((d1 * d2) / n_processes / (old_div(T, 2000.))),
np.int(old_div((d1 * d2), n_processes))) # regulates the amount of memory used
options['spatial_params']['n_pixels_per_process'] = pix_proc
options['temporal_params']['n_pixels_per_process'] = pix_proc
#%% merge spatially overlaping and temporally correlated components
if 1:
A_m, C_m, nr_m, merged_ROIs, S_m, bl_m, c1_m, sn_m, g_m = cse.merge_components(Yr, A_tot, [], np.array(C_tot), [], np.array(
C_tot), [], options['temporal_params'], options['spatial_params'], dview=dview, thr=options['merging']['thr'], mx=np.Inf)
else:
A_m, C_m, f_m = A_tot, C_tot, f
cse.utilities.view_patches_bar(Yr, A_m, C_m, b, f_m, d1, d2, C_m, img=Cn)
#%% update temporal to get Y_r
options['temporal_params']['p'] = 0
# change ifdenoised traces time constant is wrong
options['temporal_params']['fudge_factor'] = 0.96
options['temporal_params']['backend'] = 'ipyparallel'
C_m, A_m, b, f_m, S_m, bl_m, c1_m, neurons_sn_m, g2_m, YrA_m = cse.temporal.update_temporal_components(
Yr, A_m, np.atleast_2d(b).T, C_m, f, dview=dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params'])
#%% get rid of evenrually noisy components.
# But check by visual inspection to have a feeling fot the threshold. Try to be loose, you will be able to get rid of more of them later!
traces = C_m + YrA_m
idx_components, fitness, erfc = cse.utilities.evaluate_components(
traces, N=5, robust_std=False)
idx_components = idx_components[np.logical_and(True, fitness < -10)]
print((len(idx_components)))
cse.utilities.view_patches_bar(Yr, scipy.sparse.coo_matrix(A_m.tocsc()[
:, idx_components]), C_m[idx_components, :], b, f_m, d1, d2, YrA=YrA_m[idx_components, :], img=Cn)
#%%
A_m = A_m[:, idx_components]
C_m = C_m[idx_components, :]
#%% display components DO NOT RUN IF YOU HAVE TOO MANY COMPONENTS
pl.figure()
crd = cse.utilities.plot_contours(A_m, Cn, thr=0.9)
#%%
print(('Number of components:' + str(A_m.shape[-1])))
#%% UPDATE SPATIAL OCMPONENTS
t1 = time()
options['spatial_params']['method'] = 'dilate'
A2, b2, C2, f = cse.spatial.update_spatial_components(
Yr, C_m, f, A_m, sn=sn_tot, dview=dview, **options['spatial_params'])
print((time() - t1))
#%% UPDATE TEMPORAL COMPONENTS
options['temporal_params']['p'] = p
# change ifdenoised traces time constant is wrong
options['temporal_params']['fudge_factor'] = 0.96
C2, A2, b2, f2, S2, bl2, c12, neurons_sn2, g21, YrA = cse.temporal.update_temporal_components(
Yr, A2, b2, C2, f, dview=dview, bl=None, c1=None, sn=None, g=None, **options['temporal_params'])
#%% Order components
#A_or, C_or, srt = cse.utilities.order_components(A2,C2)
#%% stop server and remove log files
#cse.utilities.stop_server(is_slurm = (backend == 'SLURM'))
log_files = glob.glob('Yr*_LOG_*')
for log_file in log_files:
os.remove(log_file)
#%% order components according to a quality threshold and only select the ones wiht qualitylarger than quality_threshold.
quality_threshold = -5
traces = C2 + YrA
idx_components, fitness, erfc = cse.utilities.evaluate_components(
traces, N=5, robust_std=False)
idx_components = idx_components[fitness < quality_threshold]
print((idx_components.size * 1. / traces.shape[0]))
#%%
pl.figure()
crd = cse.utilities.plot_contours(A2.tocsc()[:, idx_components], Cn, thr=0.9)
#%%
cse.utilities.view_patches_bar(Yr, scipy.sparse.coo_matrix(A2.tocsc()[
:, idx_components]), C2[idx_components, :], b2, f2, d1, d2, YrA=YrA[idx_components, :])
#%% save analysis results in python and matlab format
if save_results:
np.savez(os.path.join(base_folder, 'results_analysis.npz'), Cn=Cn, A_tot=A_tot.todense(), C_tot=C_tot, sn_tot=sn_tot, A2=A2.todense(), C2=C2, b2=b2,
S2=S2, f2=f2, bl2=bl2, c12=c12, neurons_sn2=neurons_sn2, g21=g21, YrA=YrA, d1=d1, d2=d2, idx_components=idx_components, fitness=fitness, erfc=erfc)
# scipy.io.savemat(os.path.join(base_folder,'output_analysis_matlab.mat'),{'A2':A2,'C2':C2 , 'YrA':YrA, 'S2': S2 ,'YrA': YrA, 'd1':d1,'d2':d2,'idx_components':idx_components, 'fitness':fitness })
#%%
#%% RELOAD COMPONENTS!
if save_results:
import sys
import numpy as np
import ca_source_extraction as cse
import scipy
import pylab as pl
with np.load('results_analysis.npz') as ld:
locals().update(ld)
fname_new = 'Yr0_d1_60_d2_80_d3_1_order_C_frames_2000_.mmap'
Yr, (d1, d2), T = cse.utilities.load_memmap(fname_new)
d, T = np.shape(Yr)
Y = np.reshape(Yr, (d1, d2, T), order='F') # 3D version of the movie
traces = C2 + YrA
idx_components, fitness, erfc = cse.utilities.evaluate_components(
traces, N=5, robust_std=False)
#cse.utilities.view_patches(Yr,coo_matrix(A_or),C_or,b2,f2,d1,d2,YrA = YrA[srt,:], secs=1)
cse.utilities.view_patches_bar(Yr, scipy.sparse.coo_matrix(
A2[:, idx_components]), C2[idx_components, :], b2, f2, d1, d2, YrA=YrA[idx_components, :])
| gpl-2.0 |
sudikrt/costproML | staticDataGSir/new_test.py | 2 | 1649 | import pandas as pd
from pandas.tseries.offsets import *
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import datetime
from pandas.tools.plotting import lag_plot
from pandas.tools.plotting import autocorrelation_plot
df = pd.read_csv ("outDataSingle.csv", header=0)
print df.head()
grouped = df.groupby ('job')
print "Job List : Engineer, Tester, Carpenter, Cook, Plumber, Mechanic";
prof = raw_input ("Enter a job :").lower()
if prof == "Engineer".lower():
group = list(grouped)[2][1]
elif prof == "Tester".lower():
group = list(grouped)[5][1]
elif prof == "Cook".lower():
group = list(grouped)[1][1]
elif prof == "Carpenter".lower():
group = list(grouped)[0][1]
elif prof == "Plumber".lower() :
group = list(grouped)[4][1]
else :
group = list(grouped)[3][1]
year = input ("Enter Year :")
group.drop('job', axis=1, inplace=True)
ts_data = pd.TimeSeries(group.maxsal.values, index=pd.to_datetime(group.date))
ts_log_data = np.log (ts_data)
model = sm.tsa.ARMA (ts_log_data, order=(1,1)).fit()
y_pred = model.predict (ts_log_data.index[0].isoformat(), ts_log_data.index[-1].isoformat())
start_date = (pd.to_datetime(str(year) + '-' + str(01) + '-' + str(01))).date()
da = start_date - ts_log_data.index[-1].date()
start_date = (ts_log_data.index[-1] + Day (1)).date()
end_date = (ts_log_data.index[-1] + Day (da.days)).date()
y_forecast = model.predict(start_date.isoformat(), end_date.isoformat())
y_pred.plot()
y_forecast.plot()
#group.plot ()
#lag_plot(group)
#autocorrelation_plot(group)
maxSAl = np.exp(y_forecast)
print "Max sal :" + str(maxSAl)
plt.show ()
| apache-2.0 |
johnmgregoire/NanoCalorimetry | PnSC_h5io.py | 1 | 29384 | import pylab
import matplotlib.cm as cm
import numpy
import h5py
import os, os.path, time, copy, operator
import PnSC_ui
#from PnSC_ui import *
#from PnSC_SCui import *
#from PnSC_dataimport import *
from PnSC_math import *
v=numpy.linspace(-16,16,5)
xmm=numpy.float32([x for i in range(5) for x in v])
ymm=numpy.float32([x for x in v[::-1] for i in range(5)])
def myeval(c):
if c=='None':
c=None
else:
temp=c.lstrip('0')
if (temp=='' or temp=='.') and '0' in c:
c=0
else:
c=eval(temp)
return c
def createh5file(h5path):
if os.path.exists(h5path):
mode='r+'
else:
mode='w'
h5file=h5py.File(h5path, mode=mode)
h5file.attrs['xmm']=xmm
h5file.attrs['ymm']=ymm
h5file.attrs['cells']=numpy.int32(range(1, 26))
#node=h5file[h5groupstr]
gstrlist=['Calorimetry']
for gs in gstrlist:
if not gs in h5file:
h5file.create_group(gs)
h5file.close()
def readh5pyarray(arrpoint):
return eval('arrpoint'+('['+':,'*len(arrpoint.shape))[:-1]+']')
def create_exp_grp(h5pf, h5expname):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r+')
else:
h5file=h5pf
if 'Calorimetry' in h5file:
h5cal=h5file['Calorimetry']
else:
h5cal=h5file.create_group('Calorimetry')
if h5expname in h5cal:
del h5cal[h5expname]
h5exp=h5cal.create_group(h5expname)
h5a=h5exp.create_group('analysis')
h5m=h5exp.create_group('measurement')
h5hp=h5m.create_group('HeatProgram')
if isinstance(h5pf, str):
h5file.close()
def writenewh5heatprogram(h5path, h5expname, grpname, AttrDict, DataSetDict, SegmentData):
h5file=h5py.File(h5path, mode='r+')
h5hp=h5file['Calorimetry'][h5expname]['measurement']['HeatProgram']
if grpname in h5hp:
del h5hp[grpname]
h5hpg=h5hp.create_group(grpname)
h5hpg.attrs['segment_ms']=SegmentData[0]
h5hpg.attrs['segment_mA']=SegmentData[1]
for k, v in AttrDict.iteritems():
#print k, type(v), v
h5hpg.attrs[k]=v
for nam, (adict, data) in DataSetDict.iteritems():
h5d=h5hpg.create_dataset(nam, data=data)
for k, v in adict.iteritems():
h5d.attrs[k]=v
h5file.close()
def geth5attrs(h5pf, h5grppath):#closes the h5file, doesnt return it
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
d={}
for k, v in h5file[h5grppath].attrs.iteritems():
d[k]=(v=='None' and (None,) or (v,))[0]
if isinstance(h5pf, str):
h5file.close()
return d
def getindex_cell(h5pf, cellnum):#closes the h5file, doesnt return it
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
#i=numpy.where(h5file.attrs['cells']==cellnum)[0][0]
i=list(h5file.attrs['cells']).index(cellnum)
if isinstance(h5pf, str):
h5file.close()
return i
def getcalanalysis(h5pf, h5expname):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
h5hp=h5file['Calorimetry'][h5expname]['analysis']
if isinstance(h5pf, str):
return h5file, h5hp
return h5hp
def dt_h5(h5path, h5expname, h5hpname):
h5file=h5py.File(h5path, mode='r')
h5hp=h5file['Calorimetry'][h5expname]['measurement']['HeatProgram'][h5hpname]
dt=1./h5hp.attrs['daqHz']
h5file.close()
return dt
def atm_h5(h5path, h5expname, h5hpname):
h5file=h5py.File(h5path, mode='r')
h5hp=h5file['Calorimetry'][h5expname]['measurement']['HeatProgram'][h5hpname]
atm=h5hp.attrs['ambient_atmosphere']
h5file.close()
return atm
def pts_sincycle_h5(h5path, h5expname, h5hpname):
h5file=h5py.File(h5path, mode='r')
h5hp=h5file['Calorimetry'][h5expname]['measurement']['HeatProgram'][h5hpname]
n=h5hp.attrs['pts_sincycle']
h5file.close()
return n
def gethpgroup(h5pf, h5expname, h5hpname=None):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
h5hp=h5file['Calorimetry'][h5expname]['measurement']['HeatProgram']
if not h5hpname is None:
h5hp=h5hp[h5hpname]
if isinstance(h5pf, str):
return h5file, h5hp
return h5hp
def assign_segmsma(h5path, h5expname, segms, segmA):
h5file=h5py.File(h5path, mode='r+')
h5hp=gethpgroup(h5file, h5expname)
for node in h5hp.values():
if 'segment_ms' in node.attrs.keys():
node.attrs['segment_ms']=segms
node.attrs['segment_mA']=segmA
h5file.close()
def experimenthppaths(h5pf, h5expname):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
p=[]
h5hp=gethpgroup(h5file, h5expname)
for pnt in h5hp.values():
if isinstance(pnt, h5py.Group):
p+=[pnt.name]
if isinstance(h5pf, str):
return h5file, p
return p
def msarr_hpgrp(h5hpgrp, twod=False):
for pnt in h5hpgrp.values():
if isinstance(pnt, h5py.Dataset):
ms=numpy.linspace(0., pnt.shape[1]-1., pnt.shape[1])
arrshape=pnt.shape
break
ms/=(h5hpgrp.attrs['daqHz']/1000.)
if twod:
ms=numpy.float32([ms]*arrshape[0])
return numpy.float32(ms)
def segtypes():
return ['step', 'soak', 'ramp', 'zero']
def CreateHeatProgSegDictList(h5path, h5expname, h5hpname, critms_step=1., critmAperms_constmA=0.01, critdelmA_constmA=10., critmA_zero=0.1, expandmultdim=False):
#the segment types are step, soak, ramp, zero
#the CreateHeatProgSegDictList function reads the data from the .h5 file and organizes in a way that will be useful for many types of analysis.
#the function returns a list where there is one dict for each segment in the heat program. Each dict value that is an array is assumed to be data and all have the same shape
h5file, h5hpgrp=gethpgroup(h5path, h5expname, h5hpname)
ms=msarr_hpgrp(h5hpgrp, twod=True)
segms=h5hpgrp.attrs['segment_ms'][:]
segmA=h5hpgrp.attrs['segment_mA'][:]
dlist=[]
def indgen(t, ms1d=ms[0]):
ind=numpy.where(ms1d<=t)[0][-1]
if ind==len(ms1d)-1:
ind=len(ms1d)
return ind
for count, (ms0, ms1, mA0, mA1) in enumerate(zip(segms[:-1], segms[1:], segmA[:-1], segmA[1:])):
if (ms1-ms0)<=critms_step:
d={'segmenttype':'step'}
elif numpy.abs((mA1-mA0)/(ms1-ms0))<critmAperms_constmA and numpy.abs(mA1-mA0)<critdelmA_constmA:
if (mA1+mA0)<2.*critmA_zero:
d={'segmenttype':'zero'}
else:
d={'segmenttype':'soak'}
else:
d={'segmenttype':'ramp'}
d['ramprate']=(mA1-mA0)/(ms1-ms0)
i0=indgen(ms0)
i1=indgen(ms1)
iterpts=h5hpgrp.values()
h5an=getcalanalysis(h5file, h5expname)
#print h5hpname, h5hpname in h5an
if h5hpname in h5an:
iterpts+=h5an[h5hpname].values()
#print h5an[h5hpname].items()
for pnt in iterpts:
if isinstance(pnt, h5py.Dataset):
nam=pnt.name.rpartition('/')[2]
if len(pnt.shape)==2 or (len(pnt.shape)>2 and not expandmultdim):
arr=pnt[:, i0:i1]
#print nam, arr.shape, numpy.isnan(arr).sum()
if numpy.any(numpy.isnan(arr)):
continue
d[nam]=arr
for key, val in pnt.attrs.iteritems():
if 'unit' in key:
d[nam]*=val
elif len(pnt.shape)>2 and expandmultdim and nam in multidimcreateseghandler.keys():
multidimcreateseghandler[nam](eval('pnt[:, i0:i1'+',:'*(len(pnt.shape)-2)+']'), nam, d)
d['cycletime']=ms[:, i0:i1]/1000.
d['segment_ms']=(ms0, ms1)
d['segment_mA']=(mA0, mA1)
d['segment_inds']=(i0, i1)
d['segindex']=count
dlist+=[copy.deepcopy(d)]
h5hpgrp.file.close()
return dlist
def extractcycle_SegDict(d, cycind):
dc={}
for k, v in d.iteritems():
if isinstance(v, numpy.ndarray) and v.shape==d['cycletime'].shape:
dc[k]=v[cycind:cycind+1, :]
else:
dc[k]=v
return dc
def piecetogethersegments(arrlist):
cycles=arrlist[0].shape[0]
return numpy.array([numpy.concatenate([arr[c] for arr in arrlist]) for c in range(cycles)], dtype=arrlist[0].dtype)
def LIA_segdicthandler(arr, nam, d):
if numpy.any(numpy.isnan(arr)):
return
for i, h in enumerate(['1', '2', '3']):
d['%s_%sw_X' %(nam, h)]=arr[:, :, i, 0]
d['%s_%sw_Y' %(nam, h)]=arr[:, :, i, 1]
def WinFFT_segdicthandler(arr, nam, d):
if numpy.any(numpy.isnan(arr)):
return
for i, h in enumerate(['0', '0+', '1w-', '1w', '1w+', '2w-', '2w', '2w+', '3w-', '3w', '3w+']):
d['%s_%sw_X' %(nam, h)]=arr[:, :, i, 0]
d['%s_%sw_Y' %(nam, h)]=arr[:, :, i, 1]
multidimcreateseghandler={\
'LIAharmonics_current':LIA_segdicthandler, \
'LIAharmonics_filteredvoltage':LIA_segdicthandler, \
'LIAharmonics_voltage':LIA_segdicthandler, \
'WinFFT_current':WinFFT_segdicthandler, \
'WinFFT_filteredvoltage':WinFFT_segdicthandler, \
'WinFFT_voltage':WinFFT_segdicthandler, \
}
def saveSCcalculations(h5path, h5expname, h5hpname, hpsegdlist, recname):
h5file=h5py.File(h5path, mode='r+')
h5an=getcalanalysis(h5file, h5expname)
h5hp=gethpgroup(h5file, h5expname)
h5rg=getSCrecipegrp(h5file, h5expname)[recname]
recsavekeys=[]
for fcnname in h5rg.attrs['fcns']:
g=h5rg[fcnname]
recsavekeys+=[g.attrs['savename']]
if h5hpname in h5an:
h5g=h5an[h5hpname]
else:
h5g=h5an.create_group(h5hpname)
savekeys=set([(k, d[k].shape[2:], d[k].dtype) for d in hpsegdlist for k in d.keys() if k in recsavekeys and not ('~' in k or k in h5hp[h5hpname] or k=='cycletime') and isinstance(d[k], numpy.ndarray) and d[k].shape[:2]==d['cycletime'].shape])
#nansegssh=[numpy.ones(d['cycletime'].shape, dtype=d['cycletime'].dtype)*numpy.nan for d in hpsegdlist]
nansegssh=[d['cycletime'].shape for d in hpsegdlist]
mastershape=piecetogethersegments([d['cycletime'] for d in hpsegdlist]).shape
for k, endshape, dtype in list(savekeys):
savearr=piecetogethersegments([(k in d.keys() and (d[k],) or (numpy.ones(ns+endshape, dtype=dtype)*numpy.nan,))[0] for d, ns in zip(hpsegdlist, nansegssh)])
if k in h5g:
del h5g[k]
ds=h5g.create_dataset(k, data=savearr)
ds.attrs['recipe']=recname
ds.attrs['epoch']=time.time()
ds.attrs['savetime']=time.asctime()
#now save fit results
savekeys=set([k for d in hpsegdlist for k in d.keys() if k.startswith('FITPARS_') and k in recsavekeys and not ('~' in k or k in h5hp[h5hpname] or k=='cycletime') and isinstance(d[k], numpy.ndarray)])
for k in list(savekeys):
if k in h5g:
h5fg=h5g[k]
else:
h5fg=h5g.create_group(k)
for n, d in enumerate(hpsegdlist):
if k in d.keys():
if `n` in h5fg:
del h5fg[`n`]
ds=h5fg.create_dataset(`n`, data=d[k])
ds.attrs['recipe']=recname
ds.attrs['epoch']=time.time()
ds.attrs['savetime']=time.asctime()
#now save profile analysis
savekeys=set([k for d in hpsegdlist for k in d.keys() if k.startswith('PROFILEANALYSIS_') and k in recsavekeys and not ('~' in k or k in h5hp[h5hpname] or k=='cycletime') and isinstance(d[k], dict)])
for k in list(savekeys):
if k in h5g:
h5pa=h5g[k]
else:
h5pa=h5g.create_group(k)
for n, d in enumerate(hpsegdlist):
if k in d.keys():
if `n` in h5pa:
h5pac=h5pa[`n`]
else:
h5pac=h5pa.create_group(`n`)
for arrk, arr in d[k].iteritems():
if isinstance(arr, numpy.ndarray):
if arrk in h5pac:
del h5pac[arrk]
ds=h5pac.create_dataset(arrk, data=arr)
ds.attrs['recipe']=recname
ds.attrs['epoch']=time.time()
ds.attrs['savetime']=time.asctime()
h5file.close()
def getfitdictlist_hp(h5path, h5expname, h5hpname):
hpsdl=CreateHeatProgSegDictList(h5path, h5expname, h5hpname)
h5file=h5py.File(h5path, mode='r')
fitdlist=[]
for k in set([k for d in hpsdl for k in d.keys()]):
for i in range(len(hpsdl)):
temp=getfitdict_nameseg(h5file, h5expname, h5hpname, k, i)
if not temp is None:
fitdlist+=[temp]
h5file.close()
return fitdlist
def getfitdict_nameseg(h5pf, h5expname, h5hpname, dsname, seg):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
try:
h5ang=getcalanalysis(h5file, h5expname)[h5hpname]
nam='FITPARS_'+dsname
ds=h5ang[nam][`seg`]
h5rg=getSCrecipegrp(h5file, h5expname)[ds.attrs['recipe']]
for fcnname in h5rg.attrs['fcns']:
g=h5rg[fcnname]
if g.attrs['savename']==nam:
break
d={}
d['seg']=seg
d['dsname']=dsname
d['fitpars']=ds[:, :]
d['fcnname']=fcnname
for k in ['parnames', 'segdkeys', 'filters', 'postfilter']:
temp=g.attrs[k]
if isinstance(temp, numpy.ndarray):#these are all list of strings stored as arrays so return them to lists
temp=list(temp)
d[k]=temp
except:
d=None
if isinstance(h5pf, str):
return h5file, d
return d
def writecellres(h5path, h5expname, h5hpname, R):
h5file=h5py.File(h5path, mode='r+')
h5hpgrp=gethpgroup(h5file, h5expname, h5hpname)
h5calan=getcalanalysis(h5file, h5expname)
if not 'CellResistance' in h5calan:
temp=numpy.zeros(len(h5file.attrs['cells']), dtype='float32')
h5res=h5calan.create_dataset('CellResistance', data=temp[:])
h5res.attrs['ambient_tempC']=temp[:]
h5res=h5calan['CellResistance']
i=getindex_cell(h5file, h5hpgrp.attrs['CELLNUMBER'])
h5res[i]=R
#this doesn't work for unkonw reason: h5res.attrs['ambient_tempC'][i]=h5hpgrp.attrs['ambient_tempC']
temp=h5res.attrs['ambient_tempC'][:]
if 'ambient_tempC' in h5hpgrp.attrs.keys():
temp[i]=h5hpgrp.attrs['ambient_tempC']
else:
temp[i]=21.
h5res.attrs['ambient_tempC']=temp[:]
h5file.close()
def writecellres_calc(h5path, h5expname, h5hpname, R):
h5file=h5py.File(h5path, mode='r+')
h5hpgrp=gethpgroup(h5file, h5expname, h5hpname)
h5hpgrp.attrs['Ro']=R
h5file.close()
def experimentgrppaths(h5pf):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
p=[]
for pnt in h5file['Calorimetry'].values():
if isinstance(pnt, h5py.Group) and 'measurement' in pnt and 'analysis' in pnt:
p+=[pnt.name]
if isinstance(h5pf, str):
return h5file, p
return p
#
#h5path=os.path.join(os.getcwd(), 'TestImport.h5')
#h5expname='experiment1'
#h5hpname='2010Nov27_Cell1_1mA_50ms_500ms_Ro_1C_a'
#critms_step=1.; critmAperms_constmA=0.0005; critmA_zero=0.05
def AddRes_CreateHeatProgSegDictList(hpsegdlist, SGnpts_curr=100, SGorder_curr=3, SGnpts_volt=100, SGorder_volt=3):
for d in hpsegdlist:
if d['segmenttype'] in ['ramp', 'soak']:
if SGwindow_curr is None or SGorder_curr is None:
c=copy.copy(d['samplecurrent'])
else:
c=numpy.array([savgolsmooth(copy.copy(x), window=SGnpts_curr, order=SGorder_curr) for x in d['samplecurrent']])
if SGwindow_volt is None or SGorder_volt is None:
v=copy.copy(d['samplevoltage'])
else:
v=numpy.array([savgolsmooth(copy.copy(x), window=SGnpts_volt, order=SGorder_volt) for x in d['samplevoltage']])
#print d['samplecurrent'].shape, d['samplevoltage'].shape
inds=numpy.where(c<=0.)# these 3 lines will effectively remove neg and inf Res and replace them with the res value of the nearest acceptable value
c=replacevalswithneighsin2nddim(c, inds)
v=replacevalswithneighsin2nddim(v, inds)
d['sampleresistance']=v/c
def RoToAl_h5(h5path, h5expname, h5hpname):#get from the array for the experiment group, an Ro attr in the hp will be used instead if it is available, in which case also use the local To if available
#rtcpath=rescalpath_getorassign(h5path, h5expname)
h5file=h5py.File(h5path, mode='r')
rtcpath=rescalpath_getorassign(h5file, h5expname)
if not h5file:#in case ti was closed in the fcn
h5file=h5py.File(h5path, mode='r')
h5hp=gethpgroup(h5file, h5expname, h5hpname)
i=getindex_cell(h5file, h5hp.attrs['CELLNUMBER'])
restempal=list(h5file[rtcpath][i])
if 'Ro' in h5hp.attrs.keys():
restempal[0]=h5hp.attrs['Ro']
if 'ambient_tempC' in h5hp.attrs:
restempal[1]=h5hp.attrs['ambient_tempC']
if 'tcr' in h5hp.attrs.keys():
restempal[2]=h5hp.attrs['tcr']
h5file.close()
return restempal[0], restempal[1], restempal[2]
def AddTemp_CreateHeatProgSegDictList(hpsegdlist, RoToAl, SGwindow_res=None, SGorder_res=3):
for d in hpsegdlist:
if 'sampleresistance' in d.keys():
if SGwindow_res is None or SGorder_res is None:
R=copy.copy(d['sampleresistance'])
else:
R=numpy.array([savgolsmooth(copy.copy(x), window=SGwindow_res, order=SGorder_res) for x in d['sampleresistance']])
d['sampletemperature']=temp_res(R, RoToAl[0], RoToAl[1], RoToAl[2])
def tempvsms_heatprogram(h5path, h5expname, h5hpname, segind=None):
hpsegdlist=CreateHeatProgSegDictList(h5path, h5expname, h5hpname)
if segind==None:
hpsegdlist=[d for d in hpsegdlist if d['segmenttype'] in ['ramp', 'soak']]
else:
hpsegdlist=[hpsegdlist[segind]]
print len(hpsegdlist)
RoToAl=RoToAl_h5(h5path, h5expname, h5hpname)
AddRes_CreateHeatProgSegDictList(hpsegdlist)
AddTemp_CreateHeatProgSegDictList(hpsegdlist, RoToAl)
print 'tempdone'
return numpy.concatenate([d['cycletime'] for d in hpsegdlist], axis=1), numpy.concatenate([d['sampletemperature'] for d in hpsegdlist], axis=1)
def getfiltergrp(h5pf, h5expname):#will only create is passed 'r+'
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
h5an=getcalanalysis(h5file, h5expname)
if 'filter' in h5an:
h5filter=h5an['filter']
elif h5file.mode=='r':
return False
else:
h5filter=h5an.create_group('filter')
if isinstance(h5pf, str):
return h5file, h5filter
return h5filter
def getfilter(h5pf, h5expname, filtername):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
h5filter=h5file['Calorimetry'][h5expname]['analysis']['filter']
d={}
for k, v in h5filter[filtername].attrs.iteritems():
d[k]=(v=='None' and (None,) or (v,))[0]
if isinstance(d[k], numpy.ndarray):
d[k]=list(d[k])
if isinstance(h5pf, str):
return h5file, d
return d
def getfilterdict(h5pf, h5expname):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
h5filter=getfiltergrp(h5file, h5expname)
if not h5filter:
h5file.close()
return False
filterd={}
for pnt in h5filter.values():
if isinstance(pnt, h5py.Group):
d={}
nam=pnt.name.rpartition('/')[2]
for k, v in pnt.attrs.iteritems():
d[k]=(v=='None' and (None,) or (v,))[0]
if isinstance(d[k], numpy.ndarray):
d[k]=list(d[k])
if len(d.keys())==0:
continue
filterd[nam]=d
if isinstance(h5pf, str):
return h5file, filterd
return filterd
def getSCrecipegrp(h5pf, h5expname):#will create the grp only if pass an r+file
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
h5an=getcalanalysis(h5file, h5expname)
if (not 'SCrecipe' in h5an) and h5file.mode=='r+':
h5hp=h5an.create_group('SCrecipe')
else:
h5hp=h5an['SCrecipe']
if isinstance(h5pf, str):
return h5file, h5hp
return h5hp
def savefilters(h5pf, h5expname, filterd):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r+')
else:
h5file=h5pf
h5filter=getfiltergrp(h5file, h5expname)
for nam, d in filterd.iteritems():
if nam in h5filter:
del h5filter[nam]
h5g=h5filter.create_group(nam)
for k, v in d.iteritems():
h5g.attrs[k]=(v is None and ('None',) or (v,))[0]
if isinstance(h5pf, str):
h5file.close()
def saveSCrecipe(h5pf, h5expname, recname, fcns, recdlist):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r+')
else:
h5file=h5pf
h5rec=getSCrecipegrp(h5file, h5expname)
if recname in h5rec:
del h5rec[recname]
h5rg=h5rec.create_group(recname)
h5rg.attrs['fcns']=fcns
for f, d in zip(fcns, recdlist):
h5g=h5rg.create_group(f)
for k, v in d.iteritems():
h5g.attrs[k]=(v is None and ('None',) or (v,))[0]
if isinstance(h5pf, str):
h5file.close()
def getSCrecipe(h5pf, h5expname, recname):
if isinstance(h5pf, str):
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
h5rec=getSCrecipegrp(h5file, h5expname)
h5filter=getfiltergrp(h5file, h5expname)
h5rg=h5rec[recname]
fcns=h5rg.attrs['fcns']
f_saven_namsegkfilk_postfilk=[]
for f in fcns:
t=tuple([])
attrs=h5rg[f].attrs
t+=(f,)
t+=(attrs['savename'],)
t+=([(nam, (segk, filter)) for nam, segk, filter in zip(attrs['parnames'], attrs['segdkeys'], attrs['filters'])],)
t+=(attrs['postfilter'],)
f_saven_namsegkfilk_postfilk+=[t]
if isinstance(h5pf, str):
return h5file, f_saven_namsegkfilk_postfilk
return f_saven_namsegkfilk_postfilk
def copySCrecipes(h5path, h5expname, h5expsource):
h5file=h5py.File(h5path, mode='r+')
h5srcrec=getSCrecipegrp(h5file, h5expsource)
h5desrec=getSCrecipegrp(h5file, h5expname)
filters=[]
for pnt in h5srcrec.itervalues():
if isinstance(pnt, h5py.Group):
nam=pnt.name.rpartition('/')[2]
if nam in h5desrec:
del h5desrec[nam]
h5file.copy(pnt, h5desrec)
filters+=[fl for pnt2 in pnt.itervalues() if isinstance(pnt2, h5py.Group) and 'filters' in pnt2.attrs.keys() for fl in pnt2.attrs['filters']]
filters+=[pnt2.attrs['postfilter'] for pnt2 in pnt.itervalues() if isinstance(pnt2, h5py.Group) and 'postfilter' in pnt2.attrs.keys()]
filters=list(set(filters))
h5srcfil=getfiltergrp(h5file, h5expsource)
h5desfil=getfiltergrp(h5file, h5expname)
for nam in filters:
if nam in h5desfil:
del h5desfil[nam]
h5file.copy(h5srcfil[nam], h5desfil)
h5file.close()
def rescalpath_getorassign(h5pf, h5expname, parent=None, forceassign=False, title='select the experiment whose R(T) calibration will be used'):
openclose=isinstance(h5pf, str)
if openclose:
h5file=h5py.File(h5pf, mode='r')
else:
h5file=h5pf
if forceassign or not ('Res_TempCalPath' in h5file['Calorimetry'][h5expname].attrs.keys()):
if parent is None:
print 'Need to ask user for the Res Cal experiment group but UI parent not specified'
return False
p=experimentgrppaths(h5file)
expname=[n.strip('/').rpartition('/')[2] for n in p if 'Res_TempCal' in getcalanalysis(h5file, n.strip('/').rpartition('/')[2])]
if openclose:
h5file.close()
idialog=PnSC_ui.selectorDialog(parent, expname, title=title)#map(operator.itemgetter(1), pathname)
if idialog.exec_():
expname=expname[idialog.index]
reopen=(not openclose) and h5file.mode=='r'
if openclose or reopen:
h5file.close()#this is an extra close for openclose
h5file=h5py.File(h5pf, mode='r+')
path=getcalanalysis(h5file, expname)['Res_TempCal'].name
h5file['Calorimetry'][h5expname].attrs['Res_TempCalPath']=path
if openclose or reopen:
h5file.close()
if reopen:
print 'in rescalpath_getorassign, file reference was passed but needed to close and there is not a way to reopen'
return path
else:
return False
else:
path=h5file['Calorimetry'][h5expname].attrs['Res_TempCalPath']
if openclose:
h5file.close()
return path
def performreferencesubtraction(segd, segkey, filter, h5path):
ashape=segd[segkey].shape
h5file=h5py.File(h5path, mode='r')
refh5grp=h5file['REFh5path']
if not 'REFalignment' in filter.keys() or not filter['REFalignment'] in segd.keys():
ref_ind=[0 for temp in range(ashape[0])]
print 'using start of scan as alignment'
else:
k=filter['REFalignment']
arr=segd[k]
ref=refh5grp[k][:, :]
if ref.shape[0]==ashape[0]:
pass
elif ref.shape[0]<ashape[0]:
ref=numpy.array([ref[0]]*ashape[0], dtype=ref.dtype) #use the first cycles over and over again
else:
ref=ref[:ashape[0], :]
if ref.shape[1]==shape[1]:
ref_ind=[0 for temp in range(ashape[0])]
elif ref.shape[1]>shape[1]:#in this case, use the alignment dataset to find out which start index (=shift) provides minimum distance between datasets
ref_ind=[numpy.argmin([((a-r[i:i+ashape[1]])**2).sum() for i in range(ref.shape[1]-ashape[1])]) for a, r in zip(arr, ref)]
else:
print 'ABORTING: THE REFERENCE DATA MUST BE AT LEAST AS LONG AS THE DATA'
h5file.close()
return
arr=segd[segkey]
ref=refh5grp[segkey][:, :]
return numpy.array([a-r[i:i+ashape[1]] for i, a, r in zip(ref_ind, arr, ref)], dtype=arr.dtype)
def savetwopointres(h5path, resarr, savename):
f=h5py.File(h5path, mode='r+')
if 'TwoPointRes' in f:
g=f['TwoPointRes']
else:
g=f.create_group('TwoPointRes')
if savename in g:
del g[savename]
ds=g.create_dataset(savename, data=resarr)
ds.attrs['doc']='numcellsx2 array, ordered by cells and then Res in Ohms for I-I and V-V'
f.close()
def readtwopointres(h5path):
f=h5py.File(h5path, mode='r')
if 'TwoPointRes' in f:
g=f['TwoPointRes']
else:
return []
a=[]
for ds in g.itervalues():
if isinstance(ds, h5py.Dataset):
a+=[(ds.name.rpartition('/')[2], ds[:, :])]
f.close()
return a
def calcRo_extraptoTo(h5path, h5expname, h5hpname, segind, inds_calcregion, nprevsegs=1, cycind=0, o_R2poly=1, iterations=1):
hpsegdlist=CreateHeatProgSegDictList(h5path, h5expname, h5hpname)
d={}
d['I2']=hpsegdlist[segind]['samplecurrent'][cycind][inds_calcregion[0]:inds_calcregion[1]]
d['V2']=hpsegdlist[segind]['samplevoltage'][cycind][inds_calcregion[0]:inds_calcregion[1]]
#T2=hpsegdlist[segind]['sampletemperature'][cycind][inds_calcregion[0]:inds_calcregion[1]]
I1=hpsegdlist[segind]['samplecurrent'][cycind][:inds_calcregion[0]]
V1=hpsegdlist[segind]['samplevoltage'][cycind][:inds_calcregion[0]]
for i in range(1, nprevsegs+1):
I1=numpy.concatenate([I1, hpsegdlist[segind-i]['samplecurrent'][cycind][:inds_calcregion[0]]])
V1=numpy.concatenate([V1, hpsegdlist[segind-i]['samplevoltage'][cycind][:inds_calcregion[0]]])
d['I1']=I1
d['V1']=V1
d['RoToAl']=RoToAl_h5(h5path, h5expname, h5hpname)
d['o_R2poly']=o_R2poly
print d['RoToAl'][0]
for i in range(iterations):
f=calcRofromheatprogram
Ro, d2=f(**dict([(k, v) for k, v in d.iteritems() if k in f.func_code.co_varnames[:f.func_code.co_argcount]]))
for k, v in d2.iteritems():
d[k]=v
if i==0:
d['RoToAl_original']=copy.copy(d['RoToAl'])
d['RoToAl']=(Ro, d['RoToAl'][1], d['RoToAl'][2])
print d['RoToAl'][0]
return Ro, d
heatprogrammetadatafcns={\
'Cell Temperature':tempvsms_heatprogram, \
}#each must take h5path, h5expname, h5hpname as arguments
#p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110816_Zr-Hf-B.h5'
#e='quadlinheating2'
#h='cell17_25malinquad2repeat_1_of_1'
#
#ans, d=calcRo_extraptoTo(p, e, h, 2, (50, 1000), o_R2poly=2, iterations=3)
#print 'done'
#p='F:/CHESS2011_h5MAIN/2011Jun01b_AuSiCu.h5'
#e='AuSiCuheat1_Ro'
#h='cell02_ro_test_1_of_1'
#CreateHeatProgSegDictList(p, e, h, critms_step=1., critmAperms_constmA=0.01, critdelmA_constmA=10., critmA_zero=0.1)
| bsd-3-clause |
5y/folium | examples/choropleth_states.py | 12 | 1111 | '''
Choropleth map of US states
'''
import folium
import pandas as pd
state_geo = r'us-states.json'
state_unemployment = r'US_Unemployment_Oct2012.csv'
state_data = pd.read_csv(state_unemployment)
#Let Folium determine the scale
states = folium.Map(location=[48, -102], zoom_start=3)
states.geo_json(geo_path=state_geo, data=state_data,
columns=['State', 'Unemployment'],
key_on='feature.id',
fill_color='YlGn', fill_opacity=0.7, line_opacity=0.2,
legend_name='Unemployment Rate (%)')
states.create_map(path='us_state_map.html')
#Let's define our own scale and change the line opacity
states2 = folium.Map(location=[48, -102], zoom_start=3)
states2.geo_json(geo_path=state_geo, data=state_data,
columns=['State', 'Unemployment'],
threshold_scale=[5, 6, 7, 8, 9, 10],
key_on='feature.id',
fill_color='BuPu', fill_opacity=0.7, line_opacity=0.5,
legend_name='Unemployment Rate (%)',
reset=True)
states2.create_map(path='us_state_map_2.html')
| mit |
kyleabeauchamp/HMCNotes | code/misc/test_lattice_fcp.py | 1 | 2434 | import lb_loader
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
import pandas as pd
import mdtraj as md
import pandas as pdb
import spack
import itertools
import numpy as np
from openmmtools.testsystems import build_lattice, generate_dummy_trajectory, LennardJonesFluid
def build_lattice_cell_old(r=1.0):
#r = 1.0 # Sphere size
L = r * 2.0 * (1.0 + 2 ** 0.5) # Box edge length
C = L / 2. # Half the edge length, e.g. the center
# First make planes with zero z offset
#plane = np.array([[r, r, 0], [L - r, r, 0], [C, C, 0], [r, L - r, 0], [L - r, L - r, 0]])
#midplane = np.array([[C, r, 0], [r, C, 0], [L - r, C, 0], [C, L - r, 0]])
z0 = r
z1 = C
z2 = L - r
xyz = np.concatenate((plane + z0, midplane + z1, plane + z2))
return xyz, L
def build_lattice_cell():
#xyz = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0 ,0, 1], [1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
#xyz.extend([[0, 0.5, 0.5], [0.5, 0.5, 0], [0.5, 0, 0.5], [1, 0.5, 0.5], [0.5, 0.5, 1], [0.5, 1, 1]])
xyz = [[0, 0, 0], [0, 0.5, 0.5], [0.5, 0.5, 0], [0.5, 0, 0.5]]
xyz = np.array(xyz)
return xyz
def build_lattice(n_particles):
n = ((n_particles / 4.) ** (1 / 3.))
if np.abs(n - np.round(n)) > 1E-10:
raise(ValueError("Must input 14 n^3 particles for some integer n!"))
else:
n = int(np.round(n))
xyz = []
cell = build_lattice_cell()
x, y, z = np.eye(3)
for atom, (i, j, k) in enumerate(itertools.product(np.arange(n), repeat=3)):
xi = cell + i * x + j * y + k * z
xyz.append(xi)
xyz = np.concatenate(xyz)
return xyz, n
n = 4 * (2 ** 3)
xyz, box = build_lattice(n)
traj = generate_dummy_trajectory(xyz, box)
traj.save("./out.pdb")
len(xyz)
x = np.linspace(0, 16, 5000)
y = np.array([f(xi) for xi in x])
plot(x, y)
testsystem = LennardJonesFluid(nparticles=1000, hcp=True)
system, positions = testsystem.system, testsystem.positions
temperature = 25*u.kelvin
integrator = hmc_integrators.HMCIntegrator(temperature, steps_per_hmc=25, timestep=1.0*u.femtoseconds)
context = lb_loader.build(system, integrator, positions, temperature)
context.getState(getEnergies=True).getPotentialEnergy()
mm.LocalEnergyMinimizer.minimize(context)
context.getState(getEnergies=True).getPotentialEnergy()
integrator.step(400)
context.getState(getEnergies=True).getPotentialEnergy()
| gpl-2.0 |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/MagneticApproxCheck/MHDfluid.py | 2 | 10620 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDallatonce as MHDpreconditioner
m = 5
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx+0
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
# parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = UnitSquareMesh(nn,nn)
order = 1
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "DG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Magnetic, Pressure, Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Magnetic.dim(), Pressure.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(1),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, b, p, r) = TrialFunctions(W)
(v, c, q, s) = TestFunctions(W)
kappa = 1.0
Mu_m =1e1
MU = 1.0/1
IterType = 'Full'
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
# MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n")
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, 1e-4, params)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New", FS = "DG")
plot(p_k, interactive = True)
b_t = TrialFunction(Velocity)
c_t = TestFunction(Velocity)
#print assemble(inner(b,c)*dx).array().shape
#print mat
#ShiftedMass = assemble(inner(mat*b,c)*dx)
#as_vector([inner(b,c)[0]*b_k[0],inner(b,c)[1]*(-b_k[1])])
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,b_k,p_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType,"DG", SaddlePoint = "Yes")
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params,"DG",SaddlePoint = "Yes")
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(1),Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
# FSpaces = [Velocity,Magnetic,Pressure,Lagrange]
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
# u = b.duplicate()
# P = CP.Assemble(PP)
u_is = PETSc.IS().createGeneral(range(Velocity.dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-3
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
tic()
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
FF = AA.sparray()[0:dim[0],0:dim[0]]
A,b = CP.Assemble(AA,bb)
# if iter == 1
if iter == 1:
u = b.duplicate()
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
F = A.getSubMatrix(u_is,u_is)
n = FacetNormal(mesh)
mat = as_matrix([[b_k[1]*b_k[1],-b_k[1]*b_k[0]],[-b_k[1]*b_k[0],b_k[0]*b_k[0]]])
a = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1/2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1/2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa*kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh())
ShiftedMass = assemble(a)
bcu.apply(ShiftedMass)
#MO.StoreMatrix(AA.sparray()[0:dim[0],0:dim[0]]+ShiftedMass.sparray(),"A")
FF = CP.Assemble(ShiftedMass)
kspF = NSprecondSetup.LSCKSPnonlinear(FF)
# if iter == 1:
if iter == 1:
u = b.duplicate()
print ("{:40}").format("MHD assemble, time: "), " ==> ",("{:4f}").format(toc()), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
print "Inititial guess norm: ", u.norm()
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
pc = ksp.getPC()
ksp.setType('gmres')
pc.setType('python')
pc.setType(PETSc.PC.Type.PYTHON)
# FSpace = [Velocity,Magnetic,Pressure,Lagrange]
reshist = {}
def monitor(ksp, its, fgnorm):
reshist[its] = fgnorm
print its," OUTER:", fgnorm
# ksp.setMonitor(monitor)
ksp.max_it = 1000
FFSS = [Velocity,Magnetic,Pressure,Lagrange]
pc.setPythonContext(MHDpreconditioner.InnerOuterMAGNETICinverse(FFSS,kspF, KSPlinearfluids[0], KSPlinearfluids[1],Fp, HiptmairMatrices[3], HiptmairMatrices[4], HiptmairMatrices[2], HiptmairMatrices[0], HiptmairMatrices[1], HiptmairMatrices[6],1e-6,FF))
# OptDB = PETSc.Options()
# OptDB['pc_factor_mat_solver_package'] = "mumps"
# OptDB['pc_factor_mat_ordering_type'] = "rcm"
# ksp.setFromOptions()
scale = b.norm()
b = b/scale
ksp.setOperators(A,A)
stime = time.time()
ksp.solve(b,u)
Soltime = time.time()- stime
NSits += ksp.its
# Mits +=dodim
u = u*scale
SolutionTime = SolutionTime +Soltime
MO.PrintStr("Number of iterations ="+str(ksp.its),60,"+","\n\n","\n\n")
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter, SaddlePoint = "Yes")
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld= np.concatenate((u_k.vector().array(),b_k.vector().array(),p_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
print SolTime
import pandas as pd
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(interpolate(ue,Velocity))
# plot(p_k)
# plot(interpolate(pe,Pressure))
# plot(b_k)
# plot(interpolate(be,Magnetic))
# plot(r_k)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
| mit |
wazeerzulfikar/scikit-learn | sklearn/ensemble/tests/test_iforest.py | 27 | 8377 | """
Testing for Isolation Forest algorithm (sklearn.ensemble.iforest).
"""
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.utils.fixes import euler_gamma
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.model_selection import ParameterGrid
from sklearn.ensemble import IsolationForest
from sklearn.ensemble.iforest import _average_path_length
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from sklearn.metrics import roc_auc_score
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_iforest():
"""Check Isolation Forest for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid({"n_estimators": [3],
"max_samples": [0.5, 1.0, 3],
"bootstrap": [True, False]})
with ignore_warnings():
for params in grid:
IsolationForest(random_state=rng,
**params).fit(X_train).predict(X_test)
def test_iforest_sparse():
"""Check IForest for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"bootstrap": [True, False]})
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
def test_iforest_error():
"""Test that it gives proper exception on deficient input."""
X = iris.data
# Test max_samples
assert_raises(ValueError,
IsolationForest(max_samples=-1).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=0.0).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=2.0).fit, X)
# The dataset has less than 256 samples, explicitly setting
# max_samples > n_samples should result in a warning. If not set
# explicitly there should be no warning
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
IsolationForest(max_samples=1000).fit, X)
assert_no_warnings(IsolationForest(max_samples='auto').fit, X)
assert_no_warnings(IsolationForest(max_samples=np.int64(2)).fit, X)
assert_raises(ValueError, IsolationForest(max_samples='foobar').fit, X)
assert_raises(ValueError, IsolationForest(max_samples=1.5).fit, X)
def test_recalculate_max_depth():
"""Check max_depth recalculation when max_samples is reset to n_samples"""
X = iris.data
clf = IsolationForest().fit(X)
for est in clf.estimators_:
assert_equal(est.max_depth, int(np.ceil(np.log2(X.shape[0]))))
def test_max_samples_attribute():
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=500)
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
clf.fit, X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=0.4).fit(X)
assert_equal(clf.max_samples_, 0.4*X.shape[0])
def test_iforest_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = IsolationForest(n_jobs=3,
random_state=0).fit(X_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = IsolationForest(n_jobs=1,
random_state=0).fit(X_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_iforest_performance():
"""Test Isolation Forest performs well"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = - clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert_greater(roc_auc_score(y_test, y_pred), 0.98)
def test_iforest_works():
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]
# Test LOF
clf = IsolationForest(random_state=rng, contamination=0.25)
clf.fit(X)
decision_func = - clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert_greater(np.min(decision_func[-2:]), np.max(decision_func[:-2]))
assert_array_equal(pred, 6 * [1] + 2 * [-1])
def test_max_samples_consistency():
# Make sure validated max_samples in iforest and BaseBagging are identical
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, clf._max_samples)
def test_iforest_subsampled_features():
# It tests non-regression for #5732 which failed at predict.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
clf = IsolationForest(max_features=0.8)
clf.fit(X_train, y_train)
clf.predict(X_test)
def test_iforest_average_path_length():
# It tests non-regression for #8549 which used the wrong formula
# for average path length, strictly for the integer case
result_one = 2. * (np.log(4.) + euler_gamma) - 2. * 4. / 5.
result_two = 2. * (np.log(998.) + euler_gamma) - 2. * 998. / 999.
assert_almost_equal(_average_path_length(1), 1., decimal=10)
assert_almost_equal(_average_path_length(5), result_one, decimal=10)
assert_almost_equal(_average_path_length(999), result_two, decimal=10)
assert_array_almost_equal(_average_path_length(np.array([1, 5, 999])),
[1., result_one, result_two], decimal=10)
| bsd-3-clause |
trevorstephens/gplearn | gplearn/tests/test_functions.py | 1 | 6173 | """Testing the Genetic Programming functions module."""
# Author: Trevor Stephens <trevorstephens.com>
#
# License: BSD 3 clause
import pickle
import numpy as np
from numpy import maximum
from sklearn.datasets import load_boston, load_breast_cancer
from sklearn.utils._testing import assert_equal, assert_raises
from sklearn.utils.validation import check_random_state
from gplearn.functions import _protected_sqrt, make_function
from gplearn.genetic import SymbolicRegressor, SymbolicTransformer
from gplearn.genetic import SymbolicClassifier
# load the boston dataset and randomly permute it
rng = check_random_state(0)
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# load the breast cancer dataset and randomly permute it
cancer = load_breast_cancer()
perm = check_random_state(0).permutation(cancer.target.size)
cancer.data = cancer.data[perm]
cancer.target = cancer.target[perm]
def test_validate_function():
"""Check that valid functions are accepted & invalid ones raise error"""
# Check arity tests
_ = make_function(function=_protected_sqrt, name='sqrt', arity=1)
# non-integer arity
assert_raises(ValueError, make_function, _protected_sqrt, 'sqrt', '1')
assert_raises(ValueError, make_function, _protected_sqrt, 'sqrt', 1.0)
# non-bool wrap
assert_raises(ValueError, make_function, _protected_sqrt, 'sqrt', 1, 'f')
# non-matching arity
assert_raises(ValueError, make_function, _protected_sqrt, 'sqrt', 2)
assert_raises(ValueError, make_function, maximum, 'max', 1)
# Check name test
assert_raises(ValueError, make_function, _protected_sqrt, 2, 1)
# Check return type tests
def bad_fun1(x1, x2):
return 'ni'
assert_raises(ValueError, make_function, bad_fun1, 'ni', 2)
# Check return shape tests
def bad_fun2(x1):
return np.ones((2, 1))
assert_raises(ValueError, make_function, bad_fun2, 'ni', 1)
# Check closure for negatives test
def _unprotected_sqrt(x1):
with np.errstate(divide='ignore', invalid='ignore'):
return np.sqrt(x1)
assert_raises(ValueError, make_function, _unprotected_sqrt, 'sqrt', 1)
# Check closure for zeros test
def _unprotected_div(x1, x2):
with np.errstate(divide='ignore', invalid='ignore'):
return np.divide(x1, x2)
assert_raises(ValueError, make_function, _unprotected_div, 'div', 2)
def test_function_in_program():
"""Check that using a custom function in a program works"""
def logic(x1, x2, x3, x4):
return np.where(x1 > x2, x3, x4)
logical = make_function(function=logic,
name='logical',
arity=4)
function_set = ['add', 'sub', 'mul', 'div', logical]
est = SymbolicTransformer(generations=2, population_size=2000,
hall_of_fame=100, n_components=10,
function_set=function_set,
parsimony_coefficient=0.0005,
max_samples=0.9, random_state=0)
est.fit(boston.data[:300, :], boston.target[:300])
formula = est._programs[0][906].__str__()
expected_formula = 'sub(logical(X6, add(X11, 0.898), X10, X2), X5)'
assert_equal(expected_formula, formula, True)
def test_parallel_custom_function():
"""Regression test for running parallel training with custom functions"""
def _logical(x1, x2, x3, x4):
return np.where(x1 > x2, x3, x4)
logical = make_function(function=_logical,
name='logical',
arity=4)
est = SymbolicRegressor(generations=2,
function_set=['add', 'sub', 'mul', 'div', logical],
random_state=0,
n_jobs=2)
est.fit(boston.data, boston.target)
_ = pickle.dumps(est)
# Unwrapped functions should fail
logical = make_function(function=_logical,
name='logical',
arity=4,
wrap=False)
est = SymbolicRegressor(generations=2,
function_set=['add', 'sub', 'mul', 'div', logical],
random_state=0,
n_jobs=2)
est.fit(boston.data, boston.target)
assert_raises(AttributeError, pickle.dumps, est)
# Single threaded will also fail in non-interactive sessions
est = SymbolicRegressor(generations=2,
function_set=['add', 'sub', 'mul', 'div', logical],
random_state=0)
est.fit(boston.data, boston.target)
assert_raises(AttributeError, pickle.dumps, est)
def test_parallel_custom_transformer():
"""Regression test for running parallel training with custom transformer"""
def _sigmoid(x1):
with np.errstate(over='ignore', under='ignore'):
return 1 / (1 + np.exp(-x1))
sigmoid = make_function(function=_sigmoid,
name='sig',
arity=1)
est = SymbolicClassifier(generations=2,
transformer=sigmoid,
random_state=0,
n_jobs=2)
est.fit(cancer.data, cancer.target)
_ = pickle.dumps(est)
# Unwrapped functions should fail
sigmoid = make_function(function=_sigmoid,
name='sig',
arity=1,
wrap=False)
est = SymbolicClassifier(generations=2,
transformer=sigmoid,
random_state=0,
n_jobs=2)
est.fit(cancer.data, cancer.target)
assert_raises(AttributeError, pickle.dumps, est)
# Single threaded will also fail in non-interactive sessions
est = SymbolicClassifier(generations=2,
transformer=sigmoid,
random_state=0)
est.fit(cancer.data, cancer.target)
assert_raises(AttributeError, pickle.dumps, est)
| bsd-3-clause |
zhuhuifeng/PyML | examples/svm.py | 1 | 1134 | import logging
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification
from mla.metrics.metrics import accuracy
from mla.svm.kernerls import Linear, RBF
from mla.svm.svm import SVM
logging.basicConfig(level=logging.DEBUG)
def classification():
# Generate a random binary classification problem.
X, y = make_classification(n_samples=1200, n_features=10, n_informative=5,
random_state=1111, n_classes=2, class_sep=1.75,)
# Convert y to {-1, 1}
y = (y * 2) - 1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=1111)
for kernel in [RBF(gamma=0.1), Linear()]:
model = SVM(max_iter=500, kernel=kernel, C=0.6)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print('Classification accuracy (%s): %s'
% (kernel, accuracy(y_test, predictions)))
if __name__ == '__main__':
classification()
| apache-2.0 |
equialgo/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 110 | 3768 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
dpressel/baseline | scripts/compare_calibrations.py | 1 | 6096 | """Plot and compare the metrics from various calibrated models.
This script creates the following:
* A csv file with columns for the Model Type (the label), and the various calibration metrics
* A grid of graphs, the first row is confidence histograms for each model, the second row is
the reliability diagram for that model.
* If the problem as binary it creates calibration curves for each model all plotted on the same graph.
Matplotlib is required to use this script. The `tabulate` package is recommended but not required.
The input of this script is pickle files created by `$MEAD-BASELINE/api-examples/analyze_calibration.py`
"""
import csv
import pickle
import argparse
from collections import Counter
from eight_mile.calibration import (
expected_calibration_error,
maximum_calibration_error,
reliability_diagram,
reliability_curve,
confidence_histogram,
Bins,
)
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Compare calibrated models by grouping visualizations and creating a table.")
parser.add_argument("--stats", nargs="+", default=[], required=True, help="A list of pickles created by the analyze_calibration.py script to compare.")
parser.add_argument("--labels", nargs="+", default=[], required=True, help="A list of labels to assign to each pickle, should have the same number of arguments as --stats")
parser.add_argument("--metrics-output", "--metrics_output", default="table.csv", help="Filename to save the resulting metrics into as a csv")
parser.add_argument("--curve-output", "--curve_output", default="curve.png", help="Filename to save the reliability curves graph to.")
parser.add_argument("--diagram-output", "--diagram_output", default="diagram.png", help="Filename to save the reliability diagrams and confidence histograms too.")
parser.add_argument("--figsize", default=10, type=int, help="The size of the figure, controls how tall the figure is.")
args = parser.parse_args()
# Make sure the labels and stats are aligned
if len(args.stats) != len(args.labels):
raise ValueError(f"You need a label for each calibration stat you load. Got {len(args.stats)} stats and {len(args.labels)} labels")
# Make sure the labels are unique
counts = Counter(args.labels)
if any(v != 1 for v in counts.values()):
raise ValueError(f"All labels must be unique, found duplicates of {[k for k, v in counts.items() if v != 1]}")
# Load the calibration stats
stats = []
for file_name in args.stats:
with open(file_name, "rb") as f:
stats.append(pickle.load(f))
# Make sure there is the same number of bins for each model
for field in stats[0]:
if not isinstance(stats[0][field], Bins):
continue
lengths = []
for stat in stats:
if stat[field] is None:
continue
lengths.append(len(stat[field].accs))
if len(set(lengths)) != 1:
raise ValueError(f"It is meaningless to compare calibrations with different numbers of bins: Mismatch was found for {field}")
def get_metrics(data, model_type):
return {
"Model Type": model_type,
"ECE": expected_calibration_error(data.accs, data.confs, data.counts) * 100,
"MCE": maximum_calibration_error(data.accs, data.confs, data.counts) * 100,
}
# Calculate the metrics based on the multiclass calibration bins
metrics = [get_metrics(stat['multiclass'], label) for stat, label in zip(stats, args.labels)]
# Print the metrics
try:
# If you have tabulate installed it prints a nice postgres style table
from tabulate import tabulate
print(tabulate(metrics, headers="keys", floatfmt=".3f", tablefmt="psql"))
except ImportError:
for metric in metrics:
for k, v in metric.items():
if isinstance(v, float):
print(f"{k}: {v:.3f}")
else:
print(f"{k}: {v}")
# Write the metrics to a csv to look at later
with open(args.metrics_output, "w", newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=list(metrics[0].keys()), quoting=csv.QUOTE_MINIMAL, delimiter=",", dialect="unix")
writer.writeheader()
writer.writerows(metrics)
# Plot the histograms and graphs for each model
f, ax = plt.subplots(2, len(metrics), figsize=(args.figsize * len(metrics) // 2, args.figsize), sharey=True, sharex=True)
for i, (stat, label) in enumerate(zip(stats, args.labels)):
# If you are the first model you get y_labels, everyone else just uses yours
if i == 0:
confidence_histogram(
stat['histogram'].edges,
stat['histogram'].counts,
acc=stat['acc'],
avg_conf=stat['conf'],
title=f"{label}\nConfidence Distribution",
x_label=None,
ax=ax[0][i],
)
reliability_diagram(
stat['multiclass'].accs,
stat['multiclass'].confs,
stat['multiclass'].edges,
num_classes=stat['num_classes'],
ax=ax[1][i]
)
else:
confidence_histogram(
stat['histogram'].edges,
stat['histogram'].counts,
acc=stat['acc'],
avg_conf=stat['conf'],
title=f"{label}\nConfidence Distribution",
y_label=None,
x_label=None,
ax=ax[0][i],
)
reliability_diagram(
stat['multiclass'].accs,
stat['multiclass'].confs,
stat['multiclass'].edges,
num_classes=stat['num_classes'],
y_label=None,
ax=ax[1][i]
)
f.savefig(args.diagram_output)
plt.show()
# Plot reliability curves for binary classification models
if stats[0]['num_classes'] == 2:
f, ax = plt.subplots(1, 1, figsize=(args.figsize, args.figsize))
for stat, label, color in zip(stats, args.labels, plt.rcParams['axes.prop_cycle'].by_key()['color']):
reliability_curve(
stat['binary'].accs,
stat['binary'].confs,
color=color,
label=label,
ax=ax
)
f.savefig(args.curve_output)
plt.show()
| apache-2.0 |
gautam1858/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 13 | 6775 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support (deprecated)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
USE OF THIS EXCEPTION IS DEPRECATED.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.model_selection import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
atizo/kluster | src/kluster/clustering/pca.py | 1 | 4657 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Kluster - A clustering Web Service
#
# Copyright (C) 2011 Thomas Niederberger and individual contributors (see AUTHORS).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mpl_toolkits.mplot3d import Axes3D
from numpy import *
import csv
import logging
import matplotlib.pyplot as plt
import re
import sys
logger = logging.getLogger(__name__)
class Pair:
count = 1
def __init__(self, index):
self.index = index
class Pca(object):
#http://de.wikipedia.org/wiki/Hauptkomponentenanalyse
def __init__(self):
self.tagLines = []
def pca(self, data, dimensions = 2):
covariance = cov(transpose(data))
n = len(covariance)
# better use svd instead of eig
val, vec = linalg.eig(covariance)
eigInd = flipud(argsort(val)[(n-dimensions):n])
# remove means from original data
noMeansData = data - mean(data, 0)
loc = transpose(dot(transpose(vec[:, eigInd]), transpose(noMeansData)))
return loc
def generateTagMatrix(self, tagLines, separator=',', minMentions = 3):
lineCounter = 0
indexCounter = 0
tagCounts = {}
tagMatrix = []
for line in tagLines:
line = line.strip()
tags = re.split('[,]\s*', line)
lineMatrix = []
for tag in tags:
if tag not in tagCounts:
tagCounts[tag] = Pair(indexCounter)
indexCounter += 1
else:
tagCounts[tag].count += 1
index = tagCounts[tag].index
lineMatrix.append(index)
lineCounter += 1
tagMatrix.append(lineMatrix)
logger.debug(lineCounter)
relevantIndizes = []
for tag, pair in tagCounts.iteritems():
if pair.count >= minMentions:
logger.debug(tag + " (" + str(pair.index) + "): " + str(pair.count))
relevantIndizes.append(pair.index)
sciMatrix = zeros((lineCounter, len(relevantIndizes)), int)
for line in range(lineCounter):
tagCounter = 0
for index in relevantIndizes:
if index in tagMatrix[line]:
sciMatrix[line][tagCounter] = 1
tagCounter += 1
#sciMatrix = np.array(tagMatrix)
logger.debug(relevantIndizes)
return sciMatrix;
def add_lines(self, lines):
self.tagLines.extend(lines)
def load_csv(self, csv_file):
self.tagLines.extend(open(csv_file, 'r').readlines())
def show(self):
self._render()
plt.show()
def render_file(self, filename):
self._render()
plt.savefig(filename)
def _render(self):
nDecomp = 2
tagMatrix = self.generateTagMatrix(self.tagLines, ',', 2)
if len(tagMatrix)>0:
logger.debug(len(tagMatrix[0, :]))
logger.debug("Records: " + str(len(self.tagLines)))
location = self.pca(tagMatrix, nDecomp)
matchesRecords = sum(tagMatrix, 1)
matchesTags = sum(tagMatrix, 0)
correlation = corrcoef(tagMatrix, rowvar=0)
plt.subplot(221)
plt.title('PCA')
plt.scatter(real(location[:, 0]), real(location[:, 1]))
plt.subplot(222)
plt.title('Korrelation')
plt.imshow(correlation, interpolation='nearest')
plt.subplot(223)
plt.title('Histogramm Records')
plt.hist(matchesRecords, range(0, 10))
plt.subplot(224)
plt.title('Histogramm Tags')
plt.hist(matchesTags, range(0, max(matchesTags) + 1)) | gpl-3.0 |
apbard/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 36 | 1641 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints // 2
npointsf = float(npoints)
nbound = 4 # bounds for the truncated normal
normbound = (1 + 1/npointsf) * nbound # actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) # integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound # bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) # fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
f, l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5, scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
mikebenfield/scikit-learn | benchmarks/bench_plot_nmf.py | 28 | 15630 | """
Benchmarks of Non-Negative Matrix Factorization
"""
# Authors: Tom Dupre la Tour (benchmark)
# Chih-Jen Linn (original projected gradient NMF implementation)
# Anthony Di Franco (projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import warnings
import numbers
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.utils.testing import ignore_warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition.nmf import NMF
from sklearn.decomposition.nmf import _initialize_nmf
from sklearn.decomposition.nmf import _beta_divergence
from sklearn.decomposition.nmf import INTEGER_TYPES, _check_init
from sklearn.externals.joblib import Memory
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import fast_dot, safe_sparse_dot, squared_norm
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted, check_non_negative
mem = Memory(cachedir='.', verbose=0)
###################
# Start of _PGNMF #
###################
# This class implements a projected gradient solver for the NMF.
# The projected gradient solver was removed from scikit-learn in version 0.19,
# and a simplified copy is used here for comparison purpose only.
# It is not tested, and it may change or disappear without notice.
def _norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(squared_norm(x))
def _nls_subproblem(X, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtX = safe_sparse_dot(W.T, X)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtX
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.",
ConvergenceWarning)
return H, grad, n_iter
def _fit_projected_gradient(X, W, H, tol, max_iter, nls_max_iter, alpha,
l1_ratio):
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
Wt, gradWt, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W, gradW = Wt.T, gradWt.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
Wt, _, _ = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W = Wt.T
return W, H, n_iter
class _PGNMF(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., nls_max_iter=10):
self.nls_max_iter = nls_max_iter
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self, 'components_')
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self, 'components_')
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if (not isinstance(n_components, INTEGER_TYPES) or
n_components <= 0):
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(self.max_iter, INTEGER_TYPES) or self.max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
# check W and H, or initialize them
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=self.init,
random_state=self.random_state)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X, W, H, self.tol, self.max_iter, self.nls_max_iter,
self.alpha, self.l1_ratio)
else: # transform
Wt, _, n_iter = _nls_subproblem(X.T, H.T, W.T, self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = 'bgr'
markers = 'ovs'
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df['init'])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df['method'])):
mask = np.logical_and(results_df['init'] == init,
results_df['method'] == method)
selected_items = results_df[mask]
plt.plot(selected_items['time'], selected_items['loss'],
color=colors[j % len(colors)], ls='-',
marker=markers[j % len(markers)],
label=method)
plt.legend(loc=0, fontsize='x-small')
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=['X', 'W0', 'H0'])
def bench_one(name, X, W0, H0, X_shape, clf_type, clf_params, init,
n_components, random_state):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(('nndsvd', 'nndsvdar', 'random')):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params['alpha'] = alpha
clf_params['l1_ratio'] = l1_ratio
clf_params['max_iter'] = max_iter
clf_params['tol'] = tol
clf_params['random_state'] = rs
clf_params['init'] = 'custom'
clf_params['n_components'] = n_components
this_loss, duration = bench_one(name, X, W, H, X.shape,
clf_type, clf_params,
init, n_components, rs)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results,
columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [("Coordinate Descent", NMF, cd_iters, {'solver': 'cd'}),
("Projected Gradient", _PGNMF, pg_iters, {'solver': 'pg'}),
("Multiplicative Update", NMF, mu_iters, {'solver': 'mu'}),
]
return clfs
if __name__ == '__main__':
alpha = 0.
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(X_faces, clfs, plot_name, n_components, tol, alpha, l1_ratio,)
plt.show()
| bsd-3-clause |
radiasoft/radtrack | radtrack/ui/matplotlibwidget.py | 1 | 1226 | #!/usr/bin/python
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as Navigationtoolbar
from matplotlib.figure import Figure
#Embeddable matplotlib figure/canvas
class MplCanvas(FigureCanvas):
def __init__(self):
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
super(MplCanvas, self).__init__(self.fig)
super(MplCanvas, self).setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
super(MplCanvas, self).updateGeometry()
#creates embeddable matplotlib figure/canvas with toolbar
class matplotlibWidget(QtGui.QWidget):
def __init__(self, parent = None):
super(matplotlibWidget, self).__init__(parent)
self.create_framentoolbar()
def create_framentoolbar(self):
self.frame = QtGui.QWidget()
self.canvas = MplCanvas()
self.canvas.setParent(self.frame)
self.mpltoolbar = Navigationtoolbar(self.canvas, self.frame)
self.vbl = QtGui.QVBoxLayout()
self.vbl.addWidget(self.mpltoolbar)
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
| apache-2.0 |
faneshion/MatchZoo | matchzoo/datasets/quora_qp/load_data.py | 1 | 2677 | """Quora Question Pairs data loader."""
import typing
from pathlib import Path
import keras
import pandas as pd
import matchzoo
_url = "https://firebasestorage.googleapis.com/v0/b/mtl-sentence" \
"-representations.appspot.com/o/data%2FQQP.zip?alt=media&" \
"token=700c6acf-160d-4d89-81d1-de4191d02cb5"
def load_data(
stage: str = 'train',
task: str = 'classification',
return_classes: bool = False,
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load QuoraQP data.
:param path: `None` for download from quora, specific path for
downloaded data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param return_classes: Whether return classes for classification task.
:return: A DataPack if `ranking`, a tuple of (DataPack, classes) if
`classification`.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f"{stage}.tsv")
data_pack = _read_data(file_path, stage)
if task == 'ranking':
task = matchzoo.tasks.Ranking()
elif task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
if stage != 'test':
data_pack.one_hot_encode_label(num_classes=2, inplace=True)
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'quora_qp', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='quora_qp'
)
return Path(ref_path).parent.joinpath('QQP')
def _read_data(path, stage):
data = pd.read_csv(path, sep='\t', error_bad_lines=False)
data = data.dropna(axis=0, how='any').reset_index(drop=True)
if stage in ['train', 'dev']:
df = pd.DataFrame({
'id_left': data['qid1'],
'id_right': data['qid2'],
'text_left': data['question1'],
'text_right': data['question2'],
'label': data['is_duplicate'].astype(int)
})
else:
df = pd.DataFrame({
'text_left': data['question1'],
'text_right': data['question2']
})
return matchzoo.pack(df)
| apache-2.0 |
turinglife/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
wanglei828/apollo | modules/tools/prediction/data_pipelines/mlp_train.py | 3 | 11023 | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
@requirement:
tensorflow- 1.3.0
Keras-1.2.2
"""
import os
import h5py
import numpy as np
import logging
import argparse
import google.protobuf.text_format as text_format
from keras.callbacks import ModelCheckpoint
from keras.metrics import mse
from keras.models import Sequential, Model
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, Input
from keras.layers import Activation
from keras.layers import Dropout
from keras.utils import np_utils
from keras.regularizers import l2, l1
from sklearn.model_selection import train_test_split
import proto.fnn_model_pb2
from proto.fnn_model_pb2 import FnnModel, Layer
import common.log
from common.data_preprocess import load_h5
from common.data_preprocess import down_sample
from common.data_preprocess import train_test_split
from common.configure import parameters
from common.configure import labels
# Constants
dim_input = parameters['mlp']['dim_input']
dim_hidden_1 = parameters['mlp']['dim_hidden_1']
dim_hidden_2 = parameters['mlp']['dim_hidden_2']
dim_output = parameters['mlp']['dim_output']
train_data_rate = parameters['mlp']['train_data_rate']
evaluation_log_path = os.path.join(os.getcwd(), "evaluation_report")
common.log.init_log(evaluation_log_path, level=logging.DEBUG)
def load_data(filename):
"""
Load the data from h5 file to the format of numpy
"""
if not (os.path.exists(filename)):
logging.error("file: {}, does not exist".format(filename))
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
logging.error("file: {} is not an hdf5 file".format(filename))
os._exit(1)
samples = dict()
h5_file = h5py.File(filename, 'r')
for key in h5_file.keys():
samples[key] = h5_file[key][:]
print("load file success")
return samples['data']
def down_sample(data):
cutin_false_drate = 0.9
go_false_drate = 0.9
go_true_drate = 0.7
cutin_true_drate = 0.0
label = data[:, -1]
size = np.shape(label)[0]
cutin_false_index = (label == -1)
go_false_index = (label == 0)
go_true_index = (label == 1)
cutin_true_index = (label == 2)
rand = np.random.random((size))
cutin_false_select = np.logical_and(cutin_false_index,
rand > cutin_false_drate)
cutin_true_select = np.logical_and(cutin_true_index,
rand > cutin_true_drate)
go_false_select = np.logical_and(go_false_index, rand > go_false_drate)
go_true_select = np.logical_and(go_true_index, rand > go_true_drate)
all_select = np.logical_or(cutin_false_select, cutin_true_select)
all_select = np.logical_or(all_select, go_false_select)
all_select = np.logical_or(all_select, go_true_select)
data_downsampled = data[all_select, :]
return data_downsampled
def get_param_norm(feature):
"""
Normalize the samples and save normalized parameters
"""
fea_mean = np.mean(feature, axis=0)
fea_std = np.std(feature, axis=0) + 1e-6
param_norm = (fea_mean, fea_std)
return param_norm
def setup_model():
"""
Set up neural network based on keras.Sequential
"""
model = Sequential()
model.add(
Dense(
dim_hidden_1,
input_dim=dim_input,
init='he_normal',
activation='relu',
W_regularizer=l2(0.01)))
model.add(
Dense(
dim_hidden_2,
init='he_normal',
activation='relu',
W_regularizer=l2(0.01)))
model.add(
Dense(
dim_output,
init='he_normal',
activation='sigmoid',
W_regularizer=l2(0.01)))
model.compile(
loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
def evaluate_model(y, pred):
"""
give the performance [recall, precision] of nn model
Parameters
----------
y: numpy.array; real classess
pred: numpy.array; prediction classes
Returns
-------
performance dict, store the performance in log file
"""
y = y.reshape(-1)
pred = pred.reshape(-1)
go_true = (y == labels['go_true']).sum()
go_false = (y == labels['go_false']).sum()
index_go = np.logical_or(y == labels['go_false'], y == labels['go_true'])
go_positive = (pred[index_go] == 1).sum()
go_negative = (pred[index_go] == 0).sum()
cutin_true = (y == labels['cutin_true']).sum()
cutin_false = (y == labels['cutin_false']).sum()
index_cutin = np.logical_or(y == labels['cutin_false'],
y == labels['cutin_true'])
cutin_positive = (pred[index_cutin] == 1).sum()
cutin_negative = (pred[index_cutin] == 0).sum()
logging.info("data size: {}, included:".format(y.shape[0]))
logging.info("\t True False Positive Negative")
logging.info(" Go: {:7} {:7} {:7} {:7}".format(go_true, go_false,
go_positive, go_negative))
logging.info("Cutin:{:7} {:7} {:7} {:7}".format(
cutin_true, cutin_false, cutin_positive, cutin_negative))
logging.info("--------------------SCORE-----------------------------")
logging.info(" recall precision F1-score")
ctrue = float(go_true + cutin_true)
positive = float(go_positive + cutin_positive)
tp = float((pred[y > 0.1] == 1).sum())
recall = tp / ctrue if ctrue != 0 else 0.0
precision = tp / positive if positive != 0 else 0.0
fscore = 2 * precision * recall / (
precision + recall) if precision + recall != 0 else 0.0
logging.info("Positive:{:6.3} {:6.3} {:6.3}".format(
recall, precision, fscore))
go_tp = float((pred[y == 1] == 1).sum())
go_recall = go_tp / go_true if go_true != 0 else 0.0
go_precision = go_tp / go_positive if go_positive != 0 else 0.0
go_fscore = 2 * go_precision * go_recall / (
go_precision + go_recall) if go_precision + go_recall != 0 else 0.0
logging.info(" Go:{:6.3} {:6.3} {:6.3}".format(
go_recall, go_precision, go_fscore))
cutin_tp = float((pred[y == 2] == 1).sum())
cutin_recall = cutin_tp / cutin_true if cutin_true != 0 else 0.0
cutin_precision = cutin_tp / cutin_positive if cutin_positive != 0 else 0.0
cutin_fscore = 2 * cutin_precision * cutin_recall / (
cutin_precision +
cutin_recall) if cutin_precision + cutin_recall != 0 else 0.0
logging.info(" Cutin:{:6.3} {:6.3} {:6.3}".format(
cutin_recall, cutin_precision, cutin_fscore))
logging.info("-----------------------------------------------------\n\n")
performance = {
'recall': [recall, go_recall, cutin_recall],
'precision': [precision, go_precision, cutin_precision]
}
return performance
def save_model(model, param_norm, filename):
"""
Save the trained model parameters into protobuf binary format file
"""
net_params = FnnModel()
net_params.samples_mean.columns.extend(param_norm[0].reshape(-1).tolist())
net_params.samples_std.columns.extend(param_norm[1].reshape(-1).tolist())
net_params.num_layer = 0
for layer in model.flattened_layers:
net_params.num_layer += 1
net_layer = net_params.layer.add()
config = layer.get_config()
net_layer.layer_input_dim = dim_input
net_layer.layer_output_dim = dim_output
if config['activation'] == 'relu':
net_layer.layer_activation_func = proto.fnn_model_pb2.Layer.RELU
elif config['activation'] == 'tanh':
net_layer.layer_activation_func = proto.fnn_model_pb2.Layer.TANH
elif config['activation'] == 'sigmoid':
net_layer.layer_activation_func = proto.fnn_model_pb2.Layer.SIGMOID
weights, bias = layer.get_weights()
net_layer.layer_bias.columns.extend(bias.reshape(-1).tolist())
for col in weights.tolist():
row = net_layer.layer_input_weight.rows.add()
row.columns.extend(col)
net_params.dim_input = dim_input
net_params.dim_output = dim_output
with open(filename, 'wb') as params_file:
params_file.write(net_params.SerializeToString())
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train neural network based on feature files and save parameters')
parser.add_argument('filename', type=str, help='h5 file of data.')
args = parser.parse_args()
file = args.filename
data = load_data(file)
data = down_sample(data)
print ("Data load success.")
print ("data size =", data.shape)
train_data, test_data = train_test_split(data, train_data_rate)
print ("training size =", train_data.shape)
X_train = train_data[:, 0:dim_input]
Y_train = train_data[:, -1]
Y_trainc = Y_train > 0.1
X_test = test_data[:, 0:dim_input]
Y_test = test_data[:, -1]
Y_testc = Y_test > 0.1
param_norm = get_param_norm(X_train)
X_train = (X_train - param_norm[0]) / param_norm[1]
X_test = (X_test - param_norm[0]) / param_norm[1]
model = setup_model()
model.fit(X_train, Y_trainc, shuffle=True, nb_epoch=20, batch_size=32)
print ("Model trained success.")
X_test = (X_test - param_norm[0]) / param_norm[1]
score = model.evaluate(X_test, Y_testc)
print ("\nThe accuracy on testing dat is", score[1])
logging.info("Test data loss: {}, accuracy: {} ".format(
score[0], score[1]))
Y_train_hat = model.predict_classes(X_train, batch_size=32)
Y_test_hat = model.predict_proba(X_test, batch_size=32)
logging.info("## Training Data:")
evaluate_model(Y_train, Y_train_hat)
for thres in [x / 100.0 for x in range(20, 80, 5)]:
logging.info("##threshond = {} Testing Data:".format(thres))
performance = evaluate_model(Y_test, Y_test_hat > thres)
performance['accuracy'] = [score[1]]
print ("\nFor more detailed evaluation results, please refer to",
evaluation_log_path + ".log")
model_path = os.path.join(os.getcwd(), "mlp_model.bin")
save_model(model, param_norm, model_path)
print ("Model has been saved to", model_path)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.