repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
RPGOne/Skynet
|
scikit-learn-0.18.1/examples/linear_model/plot_lasso_model_selection.py
|
311
|
5431
|
"""
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
|
bsd-3-clause
|
blink1073/scikit-image
|
skimage/viewer/canvastools/linetool.py
|
43
|
6911
|
import numpy as np
from matplotlib import lines
from ...viewer.canvastools.base import CanvasToolBase, ToolHandles
__all__ = ['LineTool', 'ThickLineTool']
class LineTool(CanvasToolBase):
"""Widget for line selection in a plot.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, manager, on_move=None, on_release=None, on_enter=None,
maxdist=10, line_props=None, handle_props=None,
**kwargs):
super(LineTool, self).__init__(manager, on_move=on_move,
on_enter=on_enter,
on_release=on_release, **kwargs)
props = dict(color='r', linewidth=1, alpha=0.4, solid_capstyle='butt')
props.update(line_props if line_props is not None else {})
self.linewidth = props['linewidth']
self.maxdist = maxdist
self._active_pt = None
x = (0, 0)
y = (0, 0)
self._end_pts = np.transpose([x, y])
self._line = lines.Line2D(x, y, visible=False, animated=True, **props)
self.ax.add_line(self._line)
self._handles = ToolHandles(self.ax, x, y,
marker_props=handle_props)
self._handles.set_visible(False)
self.artists = [self._line, self._handles.artist]
if on_enter is None:
def on_enter(pts):
x, y = np.transpose(pts)
print("length = %0.2f" %
np.sqrt(np.diff(x)**2 + np.diff(y)**2))
self.callback_on_enter = on_enter
self.manager.add_tool(self)
@property
def end_points(self):
return self._end_pts.astype(int)
@end_points.setter
def end_points(self, pts):
self._end_pts = np.asarray(pts)
self._line.set_data(np.transpose(pts))
self._handles.set_data(np.transpose(pts))
self._line.set_linewidth(self.linewidth)
self.set_visible(True)
self.redraw()
def hit_test(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return False
idx, px_dist = self._handles.closest(event.x, event.y)
if px_dist < self.maxdist:
self._active_pt = idx
return True
else:
self._active_pt = None
return False
def on_mouse_press(self, event):
self.set_visible(True)
if self._active_pt is None:
self._active_pt = 0
x, y = event.xdata, event.ydata
self._end_pts = np.array([[x, y], [x, y]])
def on_mouse_release(self, event):
if event.button != 1:
return
self._active_pt = None
self.callback_on_release(self.geometry)
self.redraw()
def on_move(self, event):
if event.button != 1 or self._active_pt is None:
return
if not self.ax.in_axes(event):
return
self.update(event.xdata, event.ydata)
self.callback_on_move(self.geometry)
def update(self, x=None, y=None):
if x is not None:
self._end_pts[self._active_pt, :] = x, y
self.end_points = self._end_pts
@property
def geometry(self):
return self.end_points
class ThickLineTool(LineTool):
"""Widget for line selection in a plot.
The thickness of the line can be varied using the mouse scroll wheel, or
with the '+' and '-' keys.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
on_change : function
Function called whenever the line thickness is changed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, manager, on_move=None, on_enter=None, on_release=None,
on_change=None, maxdist=10, line_props=None, handle_props=None):
super(ThickLineTool, self).__init__(manager,
on_move=on_move,
on_enter=on_enter,
on_release=on_release,
maxdist=maxdist,
line_props=line_props,
handle_props=handle_props)
if on_change is None:
def on_change(*args):
pass
self.callback_on_change = on_change
def on_scroll(self, event):
if not event.inaxes:
return
if event.button == 'up':
self._thicken_scan_line()
elif event.button == 'down':
self._shrink_scan_line()
def on_key_press(self, event):
if event.key == '+':
self._thicken_scan_line()
elif event.key == '-':
self._shrink_scan_line()
def _thicken_scan_line(self):
self.linewidth += 1
self.update()
self.callback_on_change(self.geometry)
def _shrink_scan_line(self):
if self.linewidth > 1:
self.linewidth -= 1
self.update()
self.callback_on_change(self.geometry)
if __name__ == '__main__': # pragma: no cover
from ... import data
from ...viewer import ImageViewer
image = data.camera()
viewer = ImageViewer(image)
h, w = image.shape
line_tool = ThickLineTool(viewer)
line_tool.end_points = ([w/3, h/2], [2*w/3, h/2])
viewer.show()
|
bsd-3-clause
|
liyi193328/seq2seq
|
seq2seq/contrib/learn/learn_io/io_test.py
|
137
|
5063
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/pandas/tests/dtypes/test_generic.py
|
7
|
2098
|
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.core.dtypes import generic as gt
class TestABCClasses(object):
tuples = [[1, 2, 2], ['red', 'blue', 'red']]
multi_index = pd.MultiIndex.from_arrays(tuples, names=('number', 'color'))
datetime_index = pd.to_datetime(['2000/1/1', '2010/1/1'])
timedelta_index = pd.to_timedelta(np.arange(5), unit='s')
period_index = pd.period_range('2000/1/1', '2010/1/1/', freq='M')
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({'names': ['a', 'b', 'c']}, index=multi_index)
sparse_series = pd.Series([1, 2, 3]).to_sparse()
sparse_array = pd.SparseArray(np.random.randn(10))
def test_abc_types(self):
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndex)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index)
assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
assert isinstance(self.multi_index, gt.ABCMultiIndex)
assert isinstance(self.datetime_index, gt.ABCDatetimeIndex)
assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex)
assert isinstance(self.period_index, gt.ABCPeriodIndex)
assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex)
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndexClass)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
assert isinstance(self.df, gt.ABCDataFrame)
with catch_warnings(record=True):
assert isinstance(self.df.to_panel(), gt.ABCPanel)
assert isinstance(self.sparse_series, gt.ABCSparseSeries)
assert isinstance(self.sparse_array, gt.ABCSparseArray)
assert isinstance(self.categorical, gt.ABCCategorical)
assert isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCPeriod)
|
mit
|
swtp1v07/Savu
|
scripts/log_evaluation/initial.py
|
1
|
1608
|
import pandas
def evaluate(selected_data):
starts = selected_data[selected_data[5].str.startswith("Start::")]
ends = selected_data[selected_data[5].str.startswith("Finish::")]
summed = {}
count = {}
for i in range(len(starts)):
start = starts[i:i+1]
aa = ends[ends[1] >= start[1].base[0]]
key = start[5].base[0].split("Start::")[1].strip()
end = aa[aa[5].str.contains(key)]
if key not in summed:
summed[key] = 0
count[key] = 0
elapsed = end[1].base[0] - start[1].base[0]
summed[key] += elapsed
count[key] += 1
return (summed, count)
def process_file(filename="../../test_data/trimmed_out.log"):
data = pandas.io.parsers.read_fwf(filename, widths=[2, 13, 5, 5, 6, 1000], header=None)
machinepds = {}
for machine in data[2].unique():
threadpds = {}
for thread in data[3].unique():
sel = data[data[2] == machine][data[3] == thread][data[4] == "INFO"]
(summed, count) = evaluate(sel)
combined_data = zip(summed.keys(), summed.values(), count.values())
df = pandas.DataFrame(data = combined_data, columns=['function', 'total_time','num_calls'])
df = df.set_index('function')
threadpds[thread] = df
machinepds[machine] = pandas.concat(threadpds)
result = pandas.concat(machinepds)
result.index = result.index.reorder_levels((2, 0, 1))
return result.sort_index(0)
df = process_file()
pandas.set_option('display.height',1000)
pandas.set_option('display.max_rows',1000)
print df
|
apache-2.0
|
sseyler/PSAnalysisTutorial
|
psa_full.py
|
2
|
6754
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Example script, part of MDAnalysis
"""
Example: Comparing a trajectories from different methods
========================================================
Example implementation of Path Similarity Analysis that shows how to read in a
set of trajectories, compute (discrete) Fréchet distances, and plot a heat
map-dendrogram.
This example uses the apo AdK transition between its open and closed crystal
structures as a testbed system (see [Seyler2014]). Trajectories are generated
given the known structural endpoints. A selection of ten different sampling
methods (three transitions each), plus the path of linear interpolation, were
used to generate a total of 31 transitions closed to open transition paths. The
(discrete) Fréchet distances are computed (between each unique pair of
trajectories) and stored in a distance matrix. (See [Seyler2015] for further
applications of and information on PSA.
The distance matrix is stored in a data file `discrete_frechet.dat` and a numpy
file `discrete_frechet.npy`, and the heat map-dendrogram showing Ward
hierarchical clustering of the distance matrix is also written to
`psadata/plots/df_war_psa-full.pdf` (requires :mod:`matplotlib`).
[Seyler2014] S.L. Seyler and O. Beckstein, Sampling large conformational
transitions: adenylate kinase as a testing ground. Mol Simul 40
(2014), 855–877, doi:10.1080/08927022.2014.919497
[Seyler2015] S.L. Seyler, A. Kumar, M.F. Thorpe, and O. Beckstein, Path
Similarity Analysis: a Method for Quantifying Macromolecular
Pathways. `arXiv:1505.04807v1`_ [q-bio.QM], 2015.
.. SeeAlso:: :mod:`MDAnalysis.analysis.psa`
"""
from MDAnalysis import Universe
from MDAnalysis.analysis.align import rotation_matrix
from MDAnalysis.analysis.psa import PSAnalysis
if __name__ == '__main__':
print("Generating AdK CORE C-alpha reference coordinates and structure...")
# Read in closed/open AdK structures; work with C-alphas only
u_closed = Universe('structs/adk1AKE.pdb')
u_open = Universe('structs/adk4AKE.pdb')
ca_closed = u_closed.select_atoms('name CA')
ca_open = u_open.select_atoms('name CA')
# Move centers-of-mass of C-alphas of each structure's CORE domain to origin
adkCORE_resids = "(resid 1:29 or resid 60:121 or resid 160:214)"
u_closed.atoms.translate(-ca_closed.select_atoms(adkCORE_resids).center_of_mass())
u_open.atoms.translate(-ca_open.select_atoms(adkCORE_resids).center_of_mass())
# Get C-alpha CORE coordinates for each structure
closed_ca_core_coords = ca_closed.select_atoms(adkCORE_resids).positions
open_ca_core_coords = ca_open.select_atoms(adkCORE_resids).positions
# Compute rotation matrix, R, that minimizes rmsd between the C-alpha COREs
R, rmsd_value = rotation_matrix(open_ca_core_coords, closed_ca_core_coords)
# Rotate open structure to align its C-alpha CORE to closed structure's
# C-alpha CORE
u_open.atoms.rotate(R)
# Generate reference structure coordinates: take average positions of
# C-alpha COREs of open and closed structures (after C-alpha CORE alignment)
reference_coordinates = 0.5*(ca_closed.select_atoms(adkCORE_resids).positions
+ ca_open.select_atoms(adkCORE_resids).positions)
# Generate Universe for reference structure with above reference coordinates
u_ref = Universe('structs/adk1AKE.pdb')
u_ref.atoms.translate(-u_ref.select_atoms(adkCORE_resids).CA.center_of_mass())
u_ref.select_atoms(adkCORE_resids).CA.set_positions(reference_coordinates)
print("Building collection of simulations...")
# List of method names (same as directory names)
method_names = ['DIMS', 'FRODA', 'GOdMD', 'MDdMD', 'rTMD-F', 'rTMD-S', \
'ANMP', 'iENM', 'MAP', 'MENM-SD', 'MENM-SP', \
'Morph', 'LinInt']
labels = [] # Heat map labels
simulations = [] # List of simulation topology/trajectory filename pairs
universes = [] # List of MDAnalysis Universes representing simulations
# Build list of simulations, each represented by a pair of filenames
# ([topology filename], [trajectory filename]). Generate corresponding label
# list.
for method in method_names:
# Note: DIMS uses the PSF topology format
topname = 'top.psf' if 'DIMS' in method or 'TMD' in method else 'top.pdb'
pathname = 'path.dcd'
method_dir = 'methods/{}'.format(method)
if method is not 'LinInt':
for run in xrange(1, 4): # 3 runs per method
run_dir = '{}/{:03n}'.format(method_dir, run)
topology = '{}/{}'.format(method_dir, topname)
trajectory = '{}/{}'.format(run_dir, pathname)
labels.append(method + '(' + str(run) + ')')
simulations.append((topology, trajectory))
else: # only one LinInt trajectory
topology = '{}/{}'.format(method_dir, topname)
trajectory = '{}/{}'.format(method_dir, pathname)
labels.append(method)
simulations.append((topology, trajectory))
# Generate simulation list represented as Universes. Each item, sim, in
# simulations is a topology/trajectory filename pair that is unpacked into
# an argument list with the "splat" ("*") operator.
for sim in simulations:
universes.append(Universe(*sim))
print("Initializing Path Similarity Analysis...")
ref_selection = "name CA and " + adkCORE_resids
psa_full = PSAnalysis(universes, reference=u_ref, ref_select=ref_selection,
path_select="name CA", labels=labels)
print("Generating Path objects from aligned trajectories...")
psa_full.generate_paths(align=True, store=True)
print("Calculating Hausdorff distance matrix...")
psa_full.run(metric='hausdorff')
print("Plotting heat map-dendrogram for hierarchical (Ward) clustering...")
psa_full.plot(filename='dh_ward_psa-full.pdf', linkage='ward');
print("Plotting annotated heat map for hierarchical (Ward) clustering...")
psa_full.plot_annotated_heatmap(filename='dh_ward_psa-full_annot.pdf', \
linkage='ward');
print("Calculating (discrete) Fréchet distance matrix...")
psa_full.run(metric='discrete_frechet')
print("Plotting heat map-dendrogram for hierarchical (Ward) clustering...")
psa_full.plot(filename='df_ward_psa-full.pdf', linkage='ward');
print("Plotting annotated heat map for hierarchical (Ward) clustering...")
psa_full.plot_annotated_heatmap(filename='df_ward_psa-full_annot.pdf', \
linkage='ward');
|
gpl-3.0
|
westurner/pypfi
|
pypfi/datagenerator.py
|
1
|
8893
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
pypfi.datagenerator
====================
Installation::
pip install arrow factory-boy
Usage::
python datagenerator.py -t
python datagenerator.py -c 20
Documentation:
* https://arrow.readthedocs.org/en/latest/
* https://factoryboy.readthedocs.org/en/latest/
* http://docs.scipy.org/doc/
"""
import codecs
import datetime
import sys
import unittest
import arrow
import factory.fuzzy as fuzzy
import numpy as np
#import pandas as pd
class DateTimeGenerator(object):
"""
Generate a series of sequential arrow.Arrow datetimes
"""
def __init__(self, start_date=None, wake=6, sleep=22, payday=5):
"""
Args:
start_date (arrow.Arrow): starting date (if None, ``.now()``)
wake (int): usual wake time
sleep (int): usual sleep time
payday (int): isoweekday (1-7, 7 is Sunday)
"""
self.start_date = (
start_date or arrow.now().replace(second=0, microsecond=0))
self.wake = wake
self.sleep = sleep
self.payday = payday
self.current_date = self.start_date
self.count = 0
def next(self):
"""
Returns:
arrow.Arrow: next datetime
"""
self.count += 1
if self.count:
next_date = self.current_date.replace(hours=+2)
self.current_date = next_date
return self.current_date
def shift(self, **kwargs):
"""
Args:
kwargs (dict): arguments for arrow.Arrow.replace
"""
self.current_date = self.current_date.replace(**kwargs)
return self.current_date
EXPENSE_PREFIXES = [
"ABC",
"XYZ",
"example.com",
]
INCOME_PREFIXES = [
"Paycheck",
]
def get_prefix(prefixes, date=None, amount=None):
"""
Get a random prefix and append " " as the suffix
Args:
prefixes (list): list of string prefixes
date (arrow.Arrow): (currently unused)
amount (numeric): (currently unused)
"""
n = np.random.randint(0, len(prefixes))
return u"%s " % prefixes[n]
class DataGenerator(object):
def __init__(self, output=None,
initial_balance=1001,
date_start=None,
date_end=None,
max_count=None,
count=0):
self.output = sys.stdout if output is None else output
self.initial_balance = initial_balance
self.balance = self.initial_balance
if date_start is None:
date_start = arrow.now().replace(second=0, microsecond=0)
self.date_start = date_start
self.date_end = date_end
self.max_count = max_count
self.count = count
self.dtg = DateTimeGenerator(self.date_start)
self.current_date = self.dtg.next()
def generate(self):
"""
Generate a transactions CSV
date,desc,amount,balance
Args:
output (file-like): output to ``.write()`` to
Returns:
file-like: output
"""
yield (
self.current_date.isoformat(sep=' '),
u"Account Statement",
0,
self.balance)
self.count += 1
while True:
debit_or_credit = np.random.randint(0, 100)
if debit_or_credit < 95:
# debit
desc = fuzzy.FuzzyText(
length=10,
prefix=get_prefix(EXPENSE_PREFIXES, date=self.current_date)
).fuzz()
amount = fuzzy.FuzzyDecimal(-100, -0.50).fuzz()
else:
# credit
desc = fuzzy.FuzzyText(
length=4,
prefix=get_prefix(INCOME_PREFIXES, date=self.current_date)
).fuzz()
amount = fuzzy.FuzzyDecimal(10, 2002).fuzz()
self.balance += amount
yield (
self.current_date.isoformat(sep=' '),
desc,
str(amount),
str(self.balance))
self.count += 1
if self.max_count and self.count >= self.max_count:
break
if self.date_end and self.current_date >= self.date_end:
break
if self.balance <= 0:
# overdraft
# TODO: self.dtg.shift_to_payday()
break
self.current_date = self.dtg.next()
def datagenerator2do():
"""
Daily cycle:
- [ ] sleep/wake (weekday)
- [ ] sleep/wake (weekend)
- [ ] meals
Recurring events:
- [ ] paid on fridays
- [ ] monthly bills
Constraints:
- [ ] sleep/wake
- [ ] no overdrafts (if balance < 0, not until payday)
"""
class Test_datagenerator(unittest.TestCase):
def test_010_get_prefix(self):
PREFIXES = EXPENSE_PREFIXES
output = get_prefix(PREFIXES)
self.assertIn(output[:-1], PREFIXES)
def test_100_DateTimeGenerator(self):
dtg = DateTimeGenerator()
self.assertTrue(dtg.start_date)
self.assertTrue(dtg.wake)
self.assertTrue(dtg.sleep)
self.assertTrue(dtg.payday)
self.assertTrue(dtg.current_date)
self.assertEqual(dtg.count, 0)
for n in range(10):
output = dtg.next()
self.assertTrue(output)
self.assertTrue(isinstance(output, arrow.Arrow))
print(n, output)
current_date = dtg.current_date
output = dtg.shift(hours=+2)
expected = datetime.timedelta(0, 7200)
self.assertEqual(output - current_date, expected)
def test_200_DataGenerator(self):
MAX_COUNT = 20
dg = DataGenerator(max_count=MAX_COUNT)
self.assertTrue(dg.output)
self.assertTrue(dg.date_start)
self.assertFalse(dg.date_end)
self.assertTrue(dg.current_date)
self.assertTrue(dg.initial_balance)
self.assertTrue(dg.balance)
self.assertEqual(dg.max_count, MAX_COUNT)
self.assertEqual(dg.count, 0)
output = dg.generate()
self.assertTrue(hasattr(output, '__iter__'))
tuples = list(output)
self.assertLessEqual(dg.count, MAX_COUNT)
for l in tuples:
print(l)
self.assertLessEqual(len(tuples), MAX_COUNT)
if (dg.balance >= 0 and
(not dg.date_end or dg.date_end and dg.current_date <= dg.date_end)):
self.assertEqual(len(tuples), MAX_COUNT)
self.assertEqual(dg.count, MAX_COUNT) # +1
def main(*args):
import logging
import optparse
import sys
prs = optparse.OptionParser(
usage="%prog -o <output.csv>")
prs.add_option('-o', '--output-file',
dest='output_file')
prs.add_option('-b', '--balance',
help='Initial balance',
dest='initial_balance',
type=float,
default=10000)
prs.add_option('-c', '--count',
dest='count',
type=int,
default=None)
prs.add_option('-d', '--days',
dest='day_count',
type=int,
default=None)
prs.add_option('-v', '--verbose',
dest='verbose',
action='store_true',)
prs.add_option('-q', '--quiet',
dest='quiet',
action='store_true',)
prs.add_option('-t', '--test',
dest='run_tests',
action='store_true',)
args = args and list(args) or sys.argv[1:]
(opts, args) = prs.parse_args(args)
if not opts.quiet:
logging.basicConfig()
if opts.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if opts.run_tests:
import sys
sys.argv = [sys.argv[0]] + args
import unittest
exit(unittest.main())
import decimal
kwargs = {
'max_count': opts.count,
'initial_balance': decimal.Decimal(opts.initial_balance)
}
if opts.day_count:
date_start = arrow.now()
date_end = date_start + datetime.timedelta(opts.day_count)
#kwargs['date_start'] = date_start
kwargs['date_end'] = date_end
import csv
if opts.output_file:
with codecs.open(opts.output_file, 'w', encoding='utf-8') as f:
writer = csv.writer(f)
for row in DataGenerator(output=f, **kwargs).generate():
writer.writerow(row)
else:
writer = csv.writer(sys.stdout)
for row in DataGenerator(output=sys.stdout, **kwargs).generate():
writer.writerow(row)
return 0
if __name__ == "__main__":
sys.exit(main())
|
bsd-3-clause
|
compops/pmh-joe2015
|
para/pmh_correlatedRVs.py
|
2
|
12777
|
##############################################################################
##############################################################################
#
# correlated pmMH algorithm
#
# Copyright (c) 2016 Johan Dahlin [ johan.dahlin (at) liu.se ]
# Distributed under the MIT license.
#
##############################################################################
##############################################################################
import numpy as np
from pmh_helpers import *
from scipy.stats import norm
import pandas
##########################################################################
# Main class
##########################################################################
class stcPMH(object):
##########################################################################
# Initalisation
##########################################################################
# The self.stepSize size and inverse Hessian for the sampler
stepSize = None;
invHessian = None;
# How many iterations should we run the sampler for and how long is the burn-in
nIter = None;
nBurnIn = None;
# When should we print a progress report? Should prior warnings be written to screen.
nProgressReport = None;
writeOutPriorWarnings = None;
# Write out to file during the run (for large simulations)
writeOutProgressToFileInterval = None;
writeOutProgressToFile = None;
fileOutName = None;
# Variables for constructing the file name when writing the output to file
filePrefix = None;
dataset = None;
# Fixed random variables for PF
rvnSamples = None;
sigmaU = None;
alpha = None;
# Wrappers
calcIACT = calcIACT_prototype;
calcSJD = calcSJD_prototype;
calcESS = calculateESS_prototype;
##########################################################################
# Main sampling routine
##########################################################################
def runSampler(self,sm,sys,thSys,proposalType='randomWalk'):
#=====================================================================
# Initalisation
#=====================================================================
# Set file prefix from model
self.filePrefix = thSys.filePrefix;
self.iter = 0;
self.PMHtype = 'pPMH0';
self.PMHtypeN = 0;
self.nPars = thSys.nParInference;
self.T = sys.T;
self.proposalTheta = proposalType;
self.nPart = sm.nPart;
self.proposeRVs = True;
# Initialising settings and using default if no settings provided
setSettings(self,'pPMH0');
# Allocate vectors
self.ll = np.zeros((self.nIter,1))
self.llp = np.zeros((self.nIter,1))
self.th = np.zeros((self.nIter,self.nPars))
self.tho = np.zeros((self.nIter,self.nPars))
self.thp = np.zeros((self.nIter,self.nPars))
self.x = np.zeros((self.nIter,self.T))
self.xp = np.zeros((self.nIter,self.T))
self.aprob = np.zeros((self.nIter,1))
self.accept = np.zeros((self.nIter,1))
self.prior = np.zeros((self.nIter,1))
self.priorp = np.zeros((self.nIter,1))
self.J = np.zeros((self.nIter,1))
self.Jp = np.zeros((self.nIter,1))
self.proposalProb = np.zeros((self.nIter,1))
self.proposalProbP = np.zeros((self.nIter,1))
self.llDiff = np.zeros((self.nIter,1))
# Sample initial auxiliary variables (random variables)
self.rvp = np.random.normal( size=( self.rvnSamples, self.T ) );
sm.rv = self.rvp;
# Initialise the parameters in the proposal
thSys.storeParameters(self.initPar,sys);
# Run the initial filter/smoother
self.estimateLikelihood(sm,thSys);
self.acceptParameters(thSys);
# Inverse transform and then save the initial parameters and the prior
self.tho[0,:] = thSys.returnParameters();
self.prior[0] = thSys.prior()
self.J[0] = thSys.Jacobian();
thSys.invTransform();
self.th[0,:] = thSys.returnParameters();
#=====================================================================
# Main MCMC-loop
#=====================================================================
for kk in range(1,self.nIter):
self.iter = kk;
# Propose parameters
self.sampleProposal();
thSys.storeParameters( self.thp[kk,:], sys );
thSys.transform();
# Calculate acceptance probability
self.calculateAcceptanceProbability( sm, thSys );
# Accept/reject step
if ( np.random.random(1) < self.aprob[kk] ):
self.acceptParameters( thSys );
else:
self.rejectParameters( thSys );
# Write out progress report
if np.remainder( kk, self.nProgressReport ) == 0:
progressPrint( self );
# Write out progress at some intervals
if ( self.writeOutProgressToFile ):
if np.remainder( kk, self.writeOutProgressToFileInterval ) == 0:
self.writeToFile( sm );
progressPrint(self);
self.thhat = np.mean( self.th[ self.nBurnIn:self.nIter, : ] , axis=0 );
self.xhats = np.mean( self.x[ self.nBurnIn:self.nIter, : ] , axis=0 );
##########################################################################
# Sample the proposal
##########################################################################
def sampleProposal(self,):
#=====================================================================
# Sample u using a mixture of a global move and Crank-Nicholson
#=====================================================================
u = np.random.uniform()
if ( u < self.alpha ):
# Global move
self.rvp = np.random.normal( size=(self.rvnSamples,self.T) );
else:
# Local move
self.rvp = np.sqrt( 1.0 - self.sigmaU**2 ) * self.rv + self.sigmaU * np.random.normal( size=(self.rvnSamples,self.T) );
#=====================================================================
# Sample theta using a random walk
#=====================================================================
if ( self.nPars == 1 ):
self.thp[self.iter,:] = self.th[self.iter-1,:] + self.stepSize * np.random.normal();
else:
self.thp[self.iter,:] = self.th[self.iter-1,:] + np.random.multivariate_normal(np.zeros(self.nPars), self.stepSize**2 * self.invHessian );
##########################################################################
# Calculate Acceptance Probability
##########################################################################
def calculateAcceptanceProbability(self, sm, thSys, ):
# Check the "hard prior"
if (thSys.priorUniform() == 0.0):
if (self.writeOutPriorWarnings):
print("The parameters " + str( self.thp[ self.iter,:] ) + " were proposed.");
return None;
# Run the smoother to get the ll-estimate, gradient and hessian-estimate
self.estimateLikelihood(sm,thSys);
# Compute the part in the acceptance probability related to the non-symmetric parameter proposal
proposalThP = 0;
proposalTh0 = 0;
# Compute prior and Jacobian
self.priorp[ self.iter ] = thSys.prior();
self.Jp[ self.iter ] = thSys.Jacobian();
# Compute the acceptance probability
self.aprob[ self.iter ] = np.exp( self.llp[ self.iter, :] - self.ll[ self.iter-1, :] + proposalTh0 - proposalThP + self.priorp[ self.iter, :] - self.prior[ self.iter-1, :] + self.Jp[ self.iter, :] - self.J[ self.iter-1, :] );
# Store the proposal calculations
self.proposalProb[ self.iter ] = proposalTh0;
self.proposalProbP[ self.iter ] = proposalThP;
self.llDiff[ self.iter ] = self.llp[ self.iter, :] - self.ll[ self.iter-1, :];
##########################################################################
# Run the SMC algorithm and get the required information
##########################################################################
def estimateLikelihood(self,sm,thSys):
# Set the auxiliary variables
sm.rv = self.rvp;
# Estimate the state and log-likelihood
sm.filter(thSys);
self.llp[ self.iter ] = sm.ll;
self.xp[ self.iter, : ] = sm.xtraj;
return None;
##########################################################################
# Helper if parameters are accepted
##########################################################################
def acceptParameters(self,thSys,):
self.th[self.iter,:] = self.thp[self.iter,:];
self.tho[self.iter,:] = thSys.returnParameters();
self.x[self.iter,:] = self.xp[self.iter,:];
self.ll[self.iter] = self.llp[self.iter];
self.accept[self.iter] = 1.0;
self.prior[self.iter,:] = self.priorp[self.iter,:];
self.J[self.iter,:] = self.Jp[self.iter,:];
self.rv = np.array( self.rvp, copy=True);
##########################################################################
# Helper if parameters are rejected
##########################################################################
def rejectParameters(self,thSys,):
self.th[self.iter,:] = self.th[self.iter-1,:];
self.tho[self.iter,:] = self.tho[self.iter-1,:];
self.x[self.iter,:] = self.x[self.iter-1,:];
self.ll[self.iter] = self.ll[self.iter-1];
self.prior[self.iter,:] = self.prior[self.iter-1,:]
self.J[self.iter,:] = self.J[self.iter-1,:];
##########################################################################
# Helper: compile the results and write to file
##########################################################################
def writeToFile(self,sm=None,fileOutName=None):
# Set file name from parameter
if ( ( self.fileOutName != None ) & (fileOutName == None) ):
fileOutName = self.fileOutName;
# Construct the columns labels
columnlabels = [None]*(2*self.nPars+3);
for ii in xrange(2*self.nPars+3): columnlabels[ii] = ii;
for ii in range(0,self.nPars):
columnlabels[ii] = "th" + str(ii);
columnlabels[ii+self.nPars] = "thp" + str(ii);
columnlabels[2*self.nPars] = "acceptProb";
columnlabels[2*self.nPars+1] = "loglikelihood";
columnlabels[2*self.nPars+2] = "acceptflag";
# Compile the results for output
out = np.hstack((self.th,self.thp,self.aprob,self.ll,self.accept));
# Write out the results to file
fileOut = pandas.DataFrame(out,columns=columnlabels);
if (fileOutName == None):
if hasattr(sm, 'filterType'):
if ( sm.filterType == "kf" ):
fileOutName = 'results/' + str(self.filePrefix) + '/' + str(self.PMHtype) + '_' + str(sm.filterType) + '/' + str(self.dataset) + '.csv';
else:
fileOutName = 'results/' + str(self.filePrefix) + '/' + str(self.PMHtype) + '_' + str(sm.filterType) + '_N' + str(sm.nPart) + '/' + str(self.dataset) + '.csv';
else:
# Fallback
fileOutName = 'results/' + str(self.filePrefix) + '/' + str(self.PMHtype) + '/' + str(self.dataset) + '.csv';
ensure_dir(fileOutName);
fileOut.to_csv(fileOutName);
print("writeToFile: wrote results to file: " + fileOutName)
#############################################################################################################################
# End of file
#############################################################################################################################
|
mit
|
Takonan/csc411_a3
|
basicBoosting.py
|
1
|
11498
|
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import LabelKFold
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVR
from sklearn.svm import SVC
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.metrics.scorer import check_scoring
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from utils import *
import time
import matplotlib.pyplot as plt
def run_AdaBoost(num_estimator=10, num_iter=5, include_mirror=False, do_cv=False):
# Loading data
# train_inputs, train_targets, valid_inputs, valid_targets = load_data(include_mirror)
inputs, targets, identities = load_data_with_identity(include_mirror)
lkf = LabelKFold(identities, n_folds=10)
# myClassifier = LogisticRegression()
# myClassifier = Perceptron(n_iter=num_iter)
# myClassifier = SGDClassifier(loss='perceptron',n_iter=num_iter)
clf = AdaBoostClassifier(n_estimators=num_estimator)
if do_cv:
# Do cross validation
# scores = cross_val_score(clf, train_inputs, train_targets)
scores = cross_val_score(clf, inputs, targets, cv=lkf)
print scores
print scores.mean()
return scores.mean()
else:
# Do just one validation
clf.fit(train_inputs, train_targets)
pred = clf.predict(valid_inputs)
score = (pred == valid_targets).mean()
return score
# clf = AdaBoostClassifier(n_estimators=100)
# scores = cross_val_score(clf, train_inputs, train_targets, n_jobs=-1)
# print scores.mean()
def run_ExtremeRandFor(include_mirror=False):
train_inputs, train_targets, valid_inputs, valid_targets = load_data(include_mirror)
clf = ExtraTreesClassifier(n_estimators=100, max_depth=None, min_samples_split=1, random_state=0, n_jobs=-1)
scores = cross_val_score(clf, train_inputs, train_targets, n_jobs=-1)
print scores.mean()
def run_RandFor(include_mirror=False):
# train_inputs, train_targets, valid_inputs, valid_targets = load_data(include_mirror)
inputs, targets, identities = load_data_with_identity(False)
# inputs, targets, identities = reload_data_with_identity_normalized()
lkf = LabelKFold(identities, n_folds=10)
clf = RandomForestClassifier(n_estimators=100, max_depth=None, min_samples_split=1, random_state=0, n_jobs=-1)
scores = cross_val_score(clf, inputs, targets, n_jobs=-1, cv=lkf)
print scores
print scores.mean()
def run_Bagging(num_estimator=10, num_iter=5, include_mirror=False, do_cv=False, reload=False):
if not reload:
train_inputs, train_targets, valid_inputs, valid_targets = load_data(include_mirror)
else:
train_inputs, train_targets, valid_inputs, valid_targets, test_inputs, test_targets = reload_data_with_test_normalized()
# myClassifier = LinearSVC()
# myClassifier = RidgeClassifier()
myClassifier = Perceptron(n_iter=num_iter)
# myClassifier = SGDClassifier(loss='perceptron',n_iter=num_iter)
# myClassifier = OneVsRestClassifier(LinearSVC(random_state=0))
# clf = BaggingClassifier(base_estimator=myClassifier, n_estimators=num_estimator, n_jobs=-1)
if do_cv:
# Do cross validation
scores = cross_val_score(clf, train_inputs, train_targets)
return scores.mean()
else:
# Do just one validation
# clf.fit(train_inputs, train_targets)
pred = myClassifier.fit(train_inputs, train_targets).predict(valid_inputs)
# pred = clf.predict(valid_inputs)
score = (pred == (valid_targets)).mean()
return score
return
def run_Bagging_LabelKFold(num_estimator=10, num_iter=5, include_mirror=False, reload=False, classifier='Perceptron'):
ZCAMatrix = np.load('ZCAMatrix.npy')
if not reload:
inputs, targets, identities = load_data_with_identity(True)
inputs = inputs.reshape(inputs.shape[0], 1, 32,32) # For CNN model
inputs = preprocess_images(inputs)
inputs = inputs.reshape(inputs.shape[0],inputs.shape[1]*inputs.shape[2]*inputs.shape[3])
inputs = np.dot(inputs,ZCAMatrix)
else:
inputs, targets, identities = reload_data_with_identity_normalized()
if classifier == 'Perceptron':
myClassifier = Perceptron(n_iter=num_iter)
elif classifier == 'DecisionTree':
myClassifier = DecisionTreeClassifier()
elif classifier == 'LinearSVC':
myClassifier = LinearSVC()
elif classifier == 'RidgeClassifier':
myClassifier = RidgeClassifier()
else:
print "Classifier not recognized. Aborting..."
return
# myClassifier = SGDClassifier(loss='perceptron',n_iter=num_iter)
# myClassifier = OneVsRestClassifier(LinearSVC(random_state=0))
clf = BaggingClassifier(base_estimator=myClassifier, n_estimators=num_estimator)
lkf = LabelKFold(identities, n_folds=10)
print "Starting cross validation testing on %s bagging with %d estimators" % (classifier, num_estimator)
scores = cross_val_score(clf, inputs, targets, cv=lkf)
print scores
print scores.mean()
return scores
def run_Bagging_testset(num_estimator=100, num_iter=25, include_mirror=True):
inputs, targets, identities = load_data_with_identity(include_mirror)
x_test = load_public_test()
myClassifier = Perceptron(n_iter=num_iter)
clf = BaggingClassifier(base_estimator=myClassifier, n_estimators=num_estimator, n_jobs=-1)
clf.fit(inputs, targets)
# Predict on the training data
train_pred = clf.predict(inputs)
print classification_report(targets, train_pred)
print "Done learning, now predicting"
pred = clf.predict(x_test)
print pred
print "Saving the output test prediction"
save_output_csv("Perceptron_Bagging_test_predictions.csv", pred)
return
def run_Bagging_NumEstimator_Experiment(classifier='Perceptron'):
val_avg_score_list = np.zeros(9)
val_max_score_list = np.zeros(9)
val_scores_list = []
num_estimator_list = np.array([1,2,3, 5, 10, 25, 50, 75, 100])
for i in xrange(num_estimator_list.shape[0]):
num_estimator = num_estimator_list[i]
print "Number of num_estimator: ", num_estimator
score = run_Bagging_LabelKFold(num_estimator=num_estimator, num_iter=10, include_mirror=True, classifier=classifier)
print "Average Validation score: ", score
val_avg_score_list[i] = score.mean()
val_max_score_list[i] = score.max()
val_scores_list.append(score)
print "Val_avg_score_list: "
print val_avg_score_list
print "Val_max_score_list: "
print val_max_score_list
print "All scores:"
print val_scores_list
print "num_estimator_list: "
print num_estimator_list
# Plot the data
plt.figure()
plt.plot(num_estimator_list, val_avg_score_list, label='Avg Validation Accuracy (10 fold)')
plt.plot(num_estimator_list, val_max_score_list, label='Max Validation Accuracy (10 fold)')
plt.legend(loc=4)
plt.title('%s Bagging Validation Accuray vs Number of Estimator' % (classifier))
plt.xlabel('Number of Estimators')
plt.ylabel('Accuracy')
plt.savefig('%s_Bagging_ValAcc_vs_NumEstimator.png' % classifier)
plt.show()
return
def pca_SVM(normalized_intensity=False, ratio=0.25):
if not normalized_intensity:
# Perform PCA on the unlabeled data (Not include the mirror)
images = load_unlabeled_data()
start = time.time()
pca = PCA(n_components=images.shape[1]*ratio)
unlabeled_pca = pca.fit_transform(images)
elasped = time.time() - start
print "Done doing PCA fit with ratio %f" % (ratio)
print "It took %f seconds" % elasped
# Now do Kernel PCA on the unlabeled_pca
# kpca = KernelPCA(kernel="rbf", gamma=15)
# start = time.time()
# unlabeled_kpca = kpca.fit(unlabeled_pca)
# unlabeled_kpca = kpca.fit(images[0:6000])
# elasped = time.time() - start
# print "Done Kernel PCA fit"
# print "It took %f seconds" % elasped
# # Perform SVM on the PCA transformed data
# train_inputs, train_targets, valid_inputs, valid_targets, test_inputs, test_targets = load_data_with_test(True)
# train_inputs = pca.transform(train_inputs)
# valid_inputs = pca.transform(valid_inputs)
# test_inputs = pca.transform(test_inputs)
# Train one vs one SVM's
clf = SVC()
# clf.fit(train_inputs, train_targets)
# val_pred = clf.predict(valid_inputs)
# print valid_targets
# print val_pred
# print accuracy_score(valid_targets, val_pred)
# print(classification_report(valid_targets, val_pred))
# test_pred = clf.predict(test_inputs)
# print test_targets
# print test_pred
# print accuracy_score(test_targets, test_pred)
# print(classification_report(test_targets, test_pred))
inputs, targets, identities = load_data_with_identity(True)
# inputs = kpca.transform(inputs)
inputs = pca.transform(inputs)
print "Dimension of inputs:", inputs.shape
lkf = LabelKFold(identities, n_folds=3)
# for train_index, test_index in lkf:
# print("TRAIN:", train_index, "TEST:", test_index)
# X_train, X_test = inputs[train_index], inputs[test_index]
# y_train, y_test = targets[train_index], targets[test_index]
# # Do not legit cross validation:
scores = cross_val_score(clf, inputs, targets, cv=lkf, n_jobs=-1)
print scores.mean()
return
if __name__ == '__main__':
#print "Running classification algorithms with original training data set:"
#start = time.time()
#run_AdaBoost(num_estimator=500, include_mirror=True, do_cv=True)
#elasped = time.time() - start
#print "Elasped time: ", elasped
# # run_ExtremeRandFor()
# run_RandFor()
# start = time.time()
# run_Bagging()
# elasped = time.time() - start
# print "Elasped time: ", elasped
# print "Running classification algorithms with original training data set and mirrorred data set:"
# # run_AdaBoost(True)
# # run_ExtremeRandFor(True)
# # run_RandFor(True)
# start = time.time()
# run_Bagging(True)
# elasped = time.time() - start
# print "Elasped time: ", elasped
#for num_estimator in [100]: #[10, 25, 50]:
# for num_iter in [25]: #[5, 10, 25, 50]:
# # print "Original Set, num_estimator: %d, num_iter: %d, accuracy: %f" % (num_estimator, num_iter, run_Bagging_LabelKFold(num_estimator, num_iter, False, False))
# print "Original + Mirrored Set, num_estimator: %d, num_iter: %d, accuracy: %f" % (num_estimator, num_iter, run_Bagging_LabelKFold(num_estimator, num_iter, True, False))
# pca_SVM()
run_Bagging_NumEstimator_Experiment(classifier='Perceptron')
|
bsd-3-clause
|
imaculate/scikit-learn
|
examples/ensemble/plot_gradient_boosting_quantile.py
|
392
|
2114
|
"""
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
thientu/scikit-learn
|
sklearn/neighbors/tests/test_neighbors.py
|
76
|
45197
|
from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
|
bsd-3-clause
|
pauliacomi/pyGAPS
|
src/pygaps/core/pointisotherm.py
|
1
|
46778
|
"""
This module contains the main class that describes an isotherm through discrete points.
"""
import logging
logger = logging.getLogger('pygaps')
import textwrap
import numpy
import pandas
from ..graphing.isotherm_graphs import plot_iso
from ..utilities.converter_mode import c_loading
from ..utilities.converter_mode import c_material
from ..utilities.converter_mode import c_pressure
from ..utilities.exceptions import CalculationError
from ..utilities.exceptions import ParameterError
from ..utilities.isotherm_interpolator import IsothermInterpolator
from .baseisotherm import BaseIsotherm
class PointIsotherm(BaseIsotherm):
"""
Class which contains the points from an adsorption isotherm.
This class is designed to be a complete description of a discrete isotherm.
It extends the Isotherm class, which contains all the description of the
isotherm parameters, but also holds the datapoints recorded during an experiment
or simulation.
The minimum arguments required to instantiate the class, besides those required for
the parent Isotherm, are data, specified either as pressure and loading arrays or
as isotherm_data (a pandas dataframe containing the discrete points), as well as
string keys for the columns of the dataframe which have the loading and the pressure data.
Parameters
----------
pressure : list
Create an isotherm directly from an array. Values for pressure.
If the ``isotherm_data`` dataframe is specified, these values are ignored.
loading : list
Create an isotherm directly from an array. Values for loading.
If the ``isotherm_data`` dataframe is specified, these values are ignored.
isotherm_data : DataFrame
Pure-component adsorption isotherm data.
pressure_key : str
The title of the pressure data in the DataFrame provided.
loading_key : str
The title of the loading data in the DataFrame provided.
other_keys : iterable
Other pandas DataFrame columns which contain data to be stored.
branch : ['guess', ads', 'des', iterable], optional
The branch of the isotherm. The code will automatically attempt to
guess if there's an adsorption and desorption branch.
The user can instead tell the framework that all points are
part of an adsorption ('ads') or desorption ('des') curve.
Alternatively, an iterable can be passed which contains
detailed info for each data point if adsorption points ('False')
or desorption points ('True'). eg: [False, False, True, True...]
or as a column of the isotherm_data.
material : str
Name of the material on which the isotherm is measured.
adsorbate : str
Isotherm adsorbate.
temperature : float
Isotherm temperature.
Other Parameters
----------------
material_basis : str, optional
Whether the adsorption is read in terms of either 'per volume'
'per molar amount' or 'per mass' of material.
material_unit : str, optional
Unit in which the material basis is expressed.
loading_basis : str, optional
Whether the adsorbed material is read in terms of either 'volume'
'molar' or 'mass'.
loading_unit : str, optional
Unit in which the loading basis is expressed.
pressure_mode : str, optional
The pressure mode, either 'absolute' pressures or 'relative' in
the form of p/p0.
pressure_unit : str, optional
Unit of pressure.
Notes
-----
This class assumes that the datapoints do not contain noise.
Detection of adsorption/desorption branches will not work if
data is noisy.
"""
_reserved_params = [
'data_raw',
'l_interpolator',
'p_interpolator',
'loading_key',
'pressure_key',
'other_keys',
]
##########################################################
# Instantiation and classmethods
def __init__(
self,
pressure=None,
loading=None,
isotherm_data=None,
pressure_key=None,
loading_key=None,
other_keys=None,
branch='guess',
**isotherm_parameters
):
"""
Instantiation is done by passing the discrete data as a pandas
DataFrame, the column keys as string as well as the parameters
required by parent class.
"""
# Checks
if isotherm_data is not None:
if None in [pressure_key, loading_key]:
raise ParameterError(
"Pass loading_key and pressure_key, the names of the loading and"
" pressure columns in the DataFrame, to the constructor."
)
# Save column names
# Name of column in the dataframe that contains adsorbed amount.
self.loading_key = loading_key
# Name of column in the dataframe that contains pressure.
self.pressure_key = pressure_key
# List of column in the dataframe that contains other points.
if other_keys:
self.other_keys = other_keys
else:
self.other_keys = []
# Pandas DataFrame that stores the data.
columns = [self.pressure_key, self.loading_key
] + sorted(self.other_keys)
if not all([a in isotherm_data.columns for a in columns]):
raise ParameterError(
"Could not find specified columns "
f"({[a for a in columns if a not in isotherm_data.columns]})"
" in the adsorption DataFrame."
)
if 'branch' in isotherm_data.columns:
columns.append('branch')
self.data_raw = isotherm_data.reindex(columns=columns)
elif pressure is not None or loading is not None:
if pressure is None or loading is None:
raise ParameterError(
"If you've chosen to pass loading and pressure directly as"
" arrays, make sure both are specified!"
)
if len(pressure) != len(loading):
raise ParameterError(
"Pressure and loading arrays are not equal!"
)
if other_keys:
raise ParameterError(
"Cannot specify other isotherm components in this mode."
" Use the ``isotherm_data`` method."
)
# Standard column names
self.pressure_key = 'pressure'
self.loading_key = 'loading'
self.other_keys = []
# DataFrame creation
self.data_raw = pandas.DataFrame({
self.pressure_key: pressure,
self.loading_key: loading
})
else:
raise ParameterError(
"Pass either the isotherm data in a pandas.DataFrame as ``isotherm_data``"
" or directly ``pressure`` and ``loading`` as arrays."
)
# Run base class constructor
BaseIsotherm.__init__(self, **isotherm_parameters)
# Deal with the isotherm branches
if 'branch' in self.data_raw.columns:
pass
elif isinstance(branch, str):
if branch == 'guess':
# Split the data in adsorption/desorption
self.data_raw.loc[:, 'branch'] = self._splitdata(
self.data_raw, self.pressure_key
)
elif branch == 'ads':
self.data_raw.loc[:, 'branch'] = False
elif branch == 'des':
self.data_raw.loc[:, 'branch'] = True
else:
raise ParameterError(
"Isotherm branch parameter must be 'guess ,'ads' or 'des'"
" or an array of booleans."
)
else:
try:
self.data_raw['branch'] = branch
except Exception as e_info:
raise ParameterError(e_info)
# The internal interpolator for loading given pressure.
self.l_interpolator = None
# The internal interpolator for pressure given loading.
self.p_interpolator = None
@classmethod
def from_isotherm(
cls,
isotherm,
pressure=None,
loading=None,
isotherm_data=None,
pressure_key=None,
loading_key=None,
other_keys=None
):
"""
Construct a point isotherm using a parent isotherm as the template for
all the parameters.
Parameters
----------
isotherm : Isotherm
An instance of the Isotherm parent class.
pressure : list
Create an isotherm directly from an array. Values for pressure.
If the ``isotherm_data`` dataframe is specified, these values are ignored.
loading : list
Create an isotherm directly from an array. Values for loading.
If the ``isotherm_data`` dataframe is specified, these values are ignored.
isotherm_data : DataFrame
Pure-component adsorption isotherm data.
loading_key : str
Column of the pandas DataFrame where the loading is stored.
pressure_key : str
Column of the pandas DataFrame where the pressure is stored.
"""
# get isotherm parameters as a dictionary
iso_params = isotherm.to_dict()
# add pointisotherm values to dict
iso_params['pressure'] = pressure
iso_params['loading'] = loading
iso_params['isotherm_data'] = isotherm_data
iso_params['pressure_key'] = pressure_key
iso_params['loading_key'] = loading_key
iso_params['other_keys'] = other_keys
return cls(**iso_params)
@classmethod
def from_modelisotherm(cls, modelisotherm, pressure_points=None):
"""
Construct a PointIsotherm from a ModelIsothem class.
This class method allows for the model to be converted into
a list of points calculated by using the model in the isotherm.
Parameters
----------
modelisotherm : ModelIsotherm
The isotherm containing the model.
pressure_points : None or List or PointIsotherm
How the pressure points should be chosen for the resulting PointIsotherm.
- If ``None``, the PointIsotherm returned has a fixed number of
equidistant points
- If an array, the PointIsotherm returned has points at each of the
values of the array
- If a PointIsotherm is passed, the values will be calculated at each
of the pressure points in the passed isotherm. This is useful for
comparing a model overlap with the real isotherm.
"""
if pressure_points is None:
pressure = modelisotherm.pressure()
elif isinstance(pressure_points, PointIsotherm):
pressure = pressure_points.pressure(branch=modelisotherm.branch)
else:
pressure = pressure_points
# TODO: in case the model isotherm calculates pressure from loading
# this is not ideal
return PointIsotherm(
isotherm_data=pandas.DataFrame({
'pressure':
pressure,
'loading':
modelisotherm.loading_at(pressure)
}),
loading_key='loading',
pressure_key='pressure',
**modelisotherm.to_dict()
)
##########################################################
# Conversion functions
def convert(
self,
pressure_mode: str = None,
pressure_unit: str = None,
loading_basis: str = None,
loading_unit: str = None,
material_basis: str = None,
material_unit: str = None,
verbose: bool = False,
):
"""
Convenience function for permanently converting any isotherm
mode/basis/units.
Parameters
----------
pressure_mode : {'absolute', 'relative', 'relative%'}
The mode in which the isotherm should be converted.
pressure_unit : str
The unit into which the internal pressure should be converted to.
Only makes sense if converting to absolute pressure.
loading_basis : {'mass', 'molar', 'volume', 'percent', 'fraction'}
The basis in which the isotherm should be converted.
loading_unit : str
The unit into which the internal loading should be converted to.
material_basis : {'mass', 'molar', 'volume'}
The basis in which the isotherm should be converted.
material_unit : str
The unit into which the material should be converted to.
verbose : bool
Print out steps taken.
"""
if pressure_mode or pressure_unit:
self.convert_pressure(
mode_to=pressure_mode,
unit_to=pressure_unit,
verbose=verbose,
)
if material_basis or material_unit:
self.convert_material(
basis_to=material_basis,
unit_to=material_unit,
verbose=verbose,
)
if loading_basis or loading_unit:
self.convert_loading(
basis_to=loading_basis,
unit_to=loading_unit,
verbose=verbose,
)
def convert_pressure(
self,
mode_to: str = None,
unit_to: str = None,
verbose: bool = False,
):
"""
Convert isotherm pressure from one unit to another
and the pressure mode from absolute to relative.
Only applicable in the case of isotherms taken below critical
point of adsorbate.
Parameters
----------
mode_to : {'absolute', 'relative', 'relative%'}
The mode in which the isotherm should be converted.
unit_to : str
The unit into which the internal pressure should be converted to.
Only makes sense if converting to absolute pressure.
verbose : bool
Print out steps taken.
"""
if not mode_to:
mode_to = self.pressure_mode
if mode_to == self.pressure_mode and unit_to == self.pressure_unit:
if verbose:
logger.info("Mode and units are the same, no changes made.")
return
self.data_raw[self.pressure_key] = c_pressure(
self.data_raw[self.pressure_key],
mode_from=self.pressure_mode,
mode_to=mode_to,
unit_from=self.pressure_unit,
unit_to=unit_to,
adsorbate=self.adsorbate,
temp=self.temperature
)
if mode_to != self.pressure_mode:
self.pressure_mode = mode_to
if unit_to != self.pressure_unit and mode_to == 'absolute':
self.pressure_unit = unit_to
else:
self.pressure_unit = None
# Reset interpolators
self.l_interpolator = None
self.p_interpolator = None
if verbose:
logger.info(
f"Changed pressure to mode '{mode_to}', unit '{unit_to}'."
)
def convert_loading(
self,
basis_to: str = None,
unit_to: str = None,
verbose: bool = False,
):
"""
Convert isotherm loading from one unit to another
and the basis of the isotherm loading to be
either 'mass', 'molar' or 'percent'/'fraction'.
Parameters
----------
basis_to : {'mass', 'molar', 'volume', 'percent', 'fraction'}
The basis in which the isotherm should be converted.
unit_to : str
The unit into which the internal loading should be converted to.
verbose : bool
Print out steps taken.
"""
if not basis_to:
basis_to = self.loading_basis
if basis_to == self.loading_basis and unit_to == self.loading_unit:
if verbose:
logger.info("Basis and units are the same, no changes made.")
return
if self.loading_basis in ['percent', 'fraction']:
if basis_to == self.loading_basis and unit_to != self.loading_unit:
if verbose:
logger.info("There are no loading units in this mode.")
return
self.data_raw[self.loading_key] = c_loading(
self.data_raw[self.loading_key],
basis_from=self.loading_basis,
basis_to=basis_to,
unit_from=self.loading_unit,
unit_to=unit_to,
adsorbate=self.adsorbate,
temp=self.temperature,
basis_material=self.material_basis,
unit_material=self.material_unit,
)
if basis_to != self.loading_basis:
self.loading_basis = basis_to
if unit_to != self.loading_unit and basis_to not in [
'percent', 'fraction'
]:
self.loading_unit = unit_to
else:
self.loading_unit = None
# Reset interpolators
self.l_interpolator = None
self.p_interpolator = None
if verbose:
logger.info(
f"Changed loading to basis '{basis_to}', unit '{unit_to}'."
)
def convert_material(
self,
basis_to: str = None,
unit_to: str = None,
verbose: bool = False
):
"""
Convert the material of the isotherm from one unit to another and the
basis of the isotherm loading to be either 'per mass' or 'per volume' or
'per mole' of material.
Only applicable to materials that have been loaded in memory with a
'density' or 'molar mass' property respectively.
Parameters
----------
basis : {'mass', 'molar', 'volume'}
The basis in which the isotherm should be converted.
unit_to : str
The unit into which the material should be converted to.
verbose : bool
Print out steps taken.
"""
if not basis_to:
basis_to = self.material_basis
if basis_to == self.material_basis and unit_to == self.material_unit:
if verbose:
logger.info("Basis and units are the same, no changes made.")
return
if (
self.loading_basis in ['percent', 'fraction']
and basis_to == self.material_basis
and unit_to != self.material_unit
):
# We "virtually" change the unit without any conversion
self.material_unit = unit_to
if verbose:
logger.info("There are no material units in this mode.")
return
self.data_raw[self.loading_key] = c_material(
self.data_raw[self.loading_key],
basis_from=self.material_basis,
basis_to=basis_to,
unit_from=self.material_unit,
unit_to=unit_to,
material=self.material
)
# A special case is when conversion is performed from
# a "fractional" basis to another "fractional" basis.
# Here, the loading must be simultaneously converted.
# e.g.: wt% = g/g -> cm3/cm3 = vol%
if self.loading_basis in ['percent', 'fraction']:
self.data_raw[self.loading_key] = c_loading(
self.data_raw[self.loading_key],
basis_from=self.material_basis,
basis_to=basis_to,
unit_from=self.material_unit,
unit_to=unit_to,
adsorbate=self.adsorbate,
temp=self.temperature,
)
if verbose:
logger.info(
f"Changed loading to basis '{basis_to}', unit '{unit_to}'."
)
if unit_to != self.material_unit:
self.material_unit = unit_to
if basis_to != self.material_basis:
self.material_basis = basis_to
# Reset interpolators
self.l_interpolator = None
self.p_interpolator = None
if verbose:
logger.info(
f"Changed material to basis '{basis_to}', unit '{unit_to}'."
)
###########################################################
# Info functions
def print_info(self, **plot_iso_args):
"""
Print a short summary of all the isotherm parameters and a graph.
Parameters
----------
show : bool, optional
Specifies if the graph is shown automatically or not.
Other Parameters
----------------
plot_iso_args : dict
options to be passed to pygaps.plot_iso()
Returns
-------
axes : matplotlib.axes.Axes or numpy.ndarray of them
"""
print(self)
return self.plot(**plot_iso_args)
def plot(self, **plot_iso_args):
"""
Plot the isotherm using pygaps.plot_iso().
Parameters
----------
show : bool, optional
Specifies if the graph is shown automatically or not.
Other Parameters
----------------
plot_iso_args : dict
options to be passed to pygaps.plot_iso()
Returns
-------
axes : matplotlib.axes.Axes or numpy.ndarray of them
"""
plot_dict = dict(
y2_data=self.other_keys[0] if self.other_keys else None,
material_basis=self.material_basis,
material_unit=self.material_unit,
loading_basis=self.loading_basis,
loading_unit=self.loading_unit,
pressure_unit=self.pressure_unit,
pressure_mode=self.pressure_mode,
fig_title=self.material,
lgd_keys=['branch'],
)
plot_dict.update(plot_iso_args)
return plot_iso(self, **plot_dict)
##########################################################
# Functions that return part of the isotherm data
def data(self, branch=None):
"""
Return underlying isotherm data.
Parameters
----------
branch : {None, 'ads', 'des'}
The branch of the isotherm to return. If ``None``, returns entire
dataset.
Returns
-------
DataFrame
The pandas DataFrame containing all isotherm data.
"""
if branch is None:
return self.data_raw
elif branch == 'ads':
return self.data_raw.loc[~self.data_raw['branch']]
elif branch == 'des':
return self.data_raw.loc[self.data_raw['branch']]
raise ParameterError('Bad branch specification.')
def pressure(
self,
branch=None,
pressure_unit=None,
pressure_mode=None,
limits=None,
indexed=False
):
"""
Return pressure points as an array.
Parameters
----------
branch : {None, 'ads', 'des'}
The branch of the pressure to return. If ``None``, returns entire
dataset.
pressure_unit : str, optional
Unit in which the pressure should be returned. If ``None``
it defaults to which pressure unit the isotherm is currently in.
pressure_mode : {None, 'absolute', 'relative', 'relative%'}
The mode in which to return the pressure, if possible. If ``None``,
returns mode the isotherm is currently in.
limits : [float, float], optional
Minimum and maximum pressure limits.
Put None or -+np.inf for no limit.
indexed : bool, optional
If this is specified to true, then the function returns an indexed
pandas.Series instead of an array.
Returns
-------
array or Series
The pressure slice corresponding to the parameters passed.
"""
ret = self.data(branch=branch).loc[:, self.pressure_key]
if not ret.empty:
# Convert if needed
if pressure_mode or pressure_unit:
# If pressure mode not given, try current
if not pressure_mode:
pressure_mode = self.pressure_mode
# If pressure unit not given, try current
if not pressure_unit:
pressure_unit = self.pressure_unit
ret = c_pressure(
ret,
mode_from=self.pressure_mode,
mode_to=pressure_mode,
unit_from=self.pressure_unit,
unit_to=pressure_unit,
adsorbate=self.adsorbate,
temp=self.temperature
)
# Select required points
if limits:
ret = ret.loc[ret.between(
-numpy.inf if limits[0] is None else limits[0],
numpy.inf if limits[1] is None else limits[1]
)]
if indexed:
return ret
return ret.values
def loading(
self,
branch=None,
loading_unit=None,
loading_basis=None,
material_unit=None,
material_basis=None,
limits=None,
indexed=False
):
"""
Return loading points as an array.
Parameters
----------
branch : {None, 'ads', 'des'}
The branch of the loading to return. If ``None``, returns entire
dataset.
loading_unit : str, optional
Unit in which the loading should be returned. If ``None``
it defaults to which loading unit the isotherm is currently in.
loading_basis : {None, 'mass', 'volume', 'molar'}
The basis on which to return the loading, if possible. If ``None``,
returns on the basis the isotherm is currently in.
material_unit : str, optional
Unit in which the material should be returned. If ``None``
it defaults to which loading unit the isotherm is currently in.
material_basis : {None, 'mass', 'volume', 'molar'}
The basis on which to return the material, if possible. If ``None``,
returns on the basis the isotherm is currently in.
limits : [float, float], optional
Minimum and maximum loading limits.
Put None or -+np.inf for no limit.
indexed : bool, optional
If this is specified to true, then the function returns an indexed
pandas.Series instead of an array.
Returns
-------
Array or Series
The loading slice corresponding to the parameters passed.
"""
ret = self.data(branch=branch).loc[:, self.loading_key]
if not ret.empty:
# Convert if needed
# First adsorbent is converted
if material_basis or material_unit:
if not material_basis:
material_basis = self.material_basis
ret = c_material(
ret,
basis_from=self.material_basis,
basis_to=material_basis,
unit_from=self.material_unit,
unit_to=material_unit,
material=self.material
)
# Then loading
if loading_basis or loading_unit:
if not loading_basis:
loading_basis = self.loading_basis
# These must be specified
# in the case of fractional conversions
if not material_basis:
material_basis = self.material_basis
if not material_unit:
material_unit = self.material_unit
ret = c_loading(
ret,
basis_from=self.loading_basis,
basis_to=loading_basis,
unit_from=self.loading_unit,
unit_to=loading_unit,
adsorbate=self.adsorbate,
temp=self.temperature,
basis_material=material_basis,
unit_material=material_unit,
)
# Select required points
if limits:
ret = ret.loc[ret.between(
-numpy.inf if limits[0] is None else limits[0],
numpy.inf if limits[1] is None else limits[1]
)]
if indexed:
return ret
return ret.values
def other_data(
self,
key,
branch=None,
limits=None,
indexed=False,
):
"""
Return supplementary data points as an array.
Parameters
----------
key : str
Key in the isotherm DataFrame containing the data to select.
branch : {None, 'ads', 'des'}
The branch of the data to return. If ``None``, returns entire
dataset.
limits : [float, float], optional
Minimum and maximum data limits.
Put None or -+np.inf for no limit.
indexed : bool, optional
If this is specified to true, then the function returns an indexed
pandas.Series instead of an array.
Returns
-------
array or Series
The data slice corresponding to the parameters passed.
"""
if key in self.other_keys:
ret = self.data(branch=branch).loc[:, key]
if not ret.empty:
# Select required points
if limits:
ret = ret.loc[ret.between(
-numpy.inf if limits[0] is None else limits[0],
numpy.inf if limits[1] is None else limits[1]
)]
if indexed:
return ret
return ret.values
raise ParameterError(f"Isotherm does not contain any {key} data.")
def has_branch(self, branch):
"""
Check if the isotherm has an specific branch.
Parameters
----------
branch : {None, 'ads', 'des'}
The branch of the data to check for.
Returns
-------
bool
Whether the data exists or not.
"""
return not self.data(branch=branch).empty
##########################################################
# Functions that interpolate values of the isotherm data
def pressure_at(
self,
loading,
branch='ads',
interpolation_type='linear',
interp_fill=None,
pressure_unit=None,
pressure_mode=None,
loading_unit=None,
loading_basis=None,
material_unit=None,
material_basis=None,
):
"""
Interpolate isotherm to compute pressure at any loading given.
Parameters
----------
loading : float
Loading at which to compute pressure.
branch : {'ads', 'des'}
The branch of the use for calculation. Defaults to adsorption.
interpolation_type : str
The type of scipy.interp1d used: `linear`, `nearest`, `zero`,
`slinear`, `quadratic`, `cubic`. It defaults to `linear`.
interp_fill : array-like or (array-like, array_like) or “extrapolate”, optional
Parameter to determine what to do outside data bounds.
Passed to the scipy.interpolate.interp1d function as ``fill_value``.
If blank, interpolation will not predict outside the bounds of data.
pressure_unit : str
Unit the pressure is returned in. If ``None``, it defaults to
internal isotherm units.
pressure_mode : str
The mode the pressure is returned in. If ``None``, it defaults to
internal isotherm mode.
loading_unit : str
Unit the loading is specified in. If ``None``, it defaults to
internal isotherm units.
loading_basis : {None, 'mass', 'volume'}
The basis the loading is specified in. If ``None``,
assumes the basis the isotherm is currently in.
material_unit : str, optional
Unit in which the material is passed in. If ``None``
it defaults to which loading unit the isotherm is currently in
material_basis : str
The basis the loading is passed in. If ``None``, it defaults to
internal isotherm basis.
Returns
-------
float
Predicted pressure at loading specified.
"""
# Convert to numpy array just in case
loading = numpy.asarray(loading)
# Check if interpolator is applicable
if (
self.p_interpolator is None
or self.p_interpolator.interp_branch != branch
or self.p_interpolator.interp_kind != interpolation_type
or self.p_interpolator.interp_fill != interp_fill
):
self.p_interpolator = IsothermInterpolator(
self.loading(branch=branch),
self.pressure(branch=branch),
interp_branch=branch,
interp_kind=interpolation_type,
interp_fill=interp_fill
)
# Ensure loading is in correct units and basis for the internal model
if material_basis or material_unit:
if not material_basis:
material_basis = self.material_basis
if not material_unit:
raise ParameterError(
"Must specify an material unit if the input is in another basis."
)
loading = c_material(
loading,
basis_from=material_basis,
basis_to=self.material_basis,
unit_from=material_unit,
unit_to=self.material_unit,
material=self.material
)
if loading_basis or loading_unit:
if not loading_basis:
loading_basis = self.loading_basis
if not loading_unit:
raise ParameterError(
"Must specify a loading unit if the input is in another basis."
)
loading = c_loading(
loading,
basis_from=loading_basis,
basis_to=self.loading_basis,
unit_from=loading_unit,
unit_to=self.loading_unit,
adsorbate=self.adsorbate,
temp=self.temperature,
basis_material=self.material_basis,
unit_material=self.material_unit,
)
# Interpolate using the internal interpolator
pressure = self.p_interpolator(loading)
# Ensure pressure is in correct units and mode requested
if pressure_mode or pressure_unit:
if not pressure_mode:
pressure_mode = self.pressure_mode
pressure = c_pressure(
pressure,
mode_from=self.pressure_mode,
mode_to=pressure_mode,
unit_from=self.pressure_unit,
unit_to=pressure_unit,
adsorbate=self.adsorbate,
temp=self.temperature
)
return pressure
def loading_at(
self,
pressure,
branch='ads',
interpolation_type='linear',
interp_fill=None,
pressure_unit=None,
pressure_mode=None,
loading_unit=None,
loading_basis=None,
material_unit=None,
material_basis=None,
):
"""
Interpolate isotherm to compute loading at any pressure given.
Parameters
----------
pressure : float or array
Pressure at which to compute loading.
branch : {'ads','des'}
The branch the interpolation takes into account.
interpolation_type : str
The type of scipy.interp1d used: `linear`, `nearest`, `zero`,
`slinear`, `quadratic`, `cubic`. It defaults to `linear`.
interp_fill : array-like or (array-like, array_like) or “extrapolate”, optional
Parameter to determine what to do outside data bounds.
Passed to the scipy.interpolate.interp1d function as ``fill_value``.
If blank, interpolation will not predict outside the bounds of data.
pressure_unit : str
Unit the pressure is specified in. If ``None``, it defaults to
internal isotherm units.
pressure_mode : str
The mode the pressure is passed in. If ``None``, it defaults to
internal isotherm mode.
loading_unit : str, optional
Unit in which the loading should be returned. If ``None``
it defaults to which loading unit the isotherm is currently in.
loading_basis : {None, 'mass', 'volume', 'molar'}
The basis on which to return the loading, if possible. If ``None``,
returns on the basis the isotherm is currently in.
material_unit : str, optional
Material unit in which the data should be returned. If ``None``
it defaults to which loading unit the isotherm is currently in.
material_basis : {None, 'mass', 'volume', 'molar'}
Material basis on which to return the data, if possible. If ``None``,
returns on the basis the isotherm is currently in.
Returns
-------
float or array
Predicted loading at pressure P.
"""
# Convert to a numpy array just in case
pressure = numpy.asarray(pressure)
# Check if interpolator is applicable
if (
self.l_interpolator is None
or self.l_interpolator.interp_branch != branch
or self.l_interpolator.interp_kind != interpolation_type
or self.l_interpolator.interp_fill != interp_fill
):
self.l_interpolator = IsothermInterpolator(
self.pressure(branch=branch),
self.loading(branch=branch),
interp_branch=branch,
interp_kind=interpolation_type,
interp_fill=interp_fill
)
# Ensure pressure is in correct units and mode for the internal model
if pressure_mode or pressure_unit:
if not pressure_mode:
pressure_mode = self.pressure_mode
if pressure_mode == 'absolute' and not pressure_unit:
raise ParameterError(
"Must specify a pressure unit if the input is in an absolute mode."
)
pressure = c_pressure(
pressure,
mode_from=pressure_mode,
mode_to=self.pressure_mode,
unit_from=pressure_unit,
unit_to=self.pressure_unit,
adsorbate=self.adsorbate,
temp=self.temperature
)
# Interpolate using the internal interpolator
loading = self.l_interpolator(pressure)
# Ensure loading is in correct units and basis requested
if material_basis or material_unit:
if not material_basis:
material_basis = self.material_basis
loading = c_material(
loading,
basis_from=self.material_basis,
basis_to=material_basis,
unit_from=self.material_unit,
unit_to=material_unit,
material=self.material
)
if loading_basis or loading_unit:
if not loading_basis:
loading_basis = self.loading_basis
loading = c_loading(
loading,
basis_from=self.loading_basis,
basis_to=loading_basis,
unit_from=self.loading_unit,
unit_to=loading_unit,
adsorbate=self.adsorbate,
temp=self.temperature,
basis_material=self.material_basis,
unit_material=self.material_unit,
)
return loading
def spreading_pressure_at(
self,
pressure,
branch='ads',
pressure_unit=None,
pressure_mode=None,
loading_unit=None,
loading_basis=None,
material_unit=None,
material_basis=None,
interp_fill=None
):
r"""
Calculate reduced spreading pressure at a bulk adsorbate pressure P.
Use numerical quadrature on isotherm data points to compute the reduced
spreading pressure via the integral:
.. math::
\Pi(p) = \int_0^p \frac{q(\hat{p})}{ \hat{p}} d\hat{p}.
In this integral, the isotherm :math:`q(\hat{p})` is represented by a
linear interpolation of the data.
For in-detail explanations, check reference [#]_.
Parameters
----------
pressure : float
Pressure (in corresponding units as data in instantiation).
branch : {'ads', 'des'}
The branch of the use for calculation. Defaults to adsorption.
loading_unit : str
Unit the loading is specified in. If ``None``, it defaults to
internal isotherm units.
pressure_unit : str
Unit the pressure is returned in. If ``None``, it defaults to
internal isotherm units.
material_basis : str
The basis the loading is passed in. If ``None``, it defaults to
internal isotherm basis.
pressure_mode : str
The mode the pressure is returned in. If ``None``, it defaults to
internal isotherm mode.
interp_fill : array-like or (array-like, array_like) or “extrapolate”, optional
Parameter to determine what to do outside data bounds.
Passed to the scipy.interpolate.interp1d function as ``fill_value``.
If blank, interpolation will not predict outside the bounds of data.
Returns
-------
float
Spreading pressure, :math:`\Pi`.
References
----------
.. [#] C. Simon, B. Smit, M. Haranczyk. pyIAST: Ideal Adsorbed Solution
Theory (IAST) Python Package. Computer Physics Communications.
"""
# Get all data points
pressures = self.pressure(
branch=branch,
pressure_unit=pressure_unit,
pressure_mode=pressure_mode
)
loadings = self.loading(
branch=branch,
loading_unit=loading_unit,
loading_basis=loading_basis,
material_unit=material_unit,
material_basis=material_basis
)
# throw exception if interpolating outside the range.
if (self.l_interpolator is not None and self.l_interpolator.interp_fill is None) & \
(pressure > pressures.max() or pressure < pressures.min()):
raise CalculationError(
textwrap.dedent(
f"""
To compute the spreading pressure at this bulk adsorbate pressure,
we would need to extrapolate the isotherm since this pressure ({pressure:.3f} {self.pressure_unit})
is outside the range of the highest pressure in your pure-component
isotherm data ({pressures.max()} {self.pressure_unit}).
At present, the PointIsotherm class is set to throw an exception
when this occurs, as we do not have data outside this pressure range
to characterize the isotherm at higher pressures.
Option 1: fit an analytical model to extrapolate the isotherm
Option 2: pass a `interp_fill` to the spreading pressure function of the
PointIsotherm object. Then, that PointIsotherm will
assume that the uptake beyond {pressures.max()} {self.pressure_unit} is given by
`interp_fill`. This is reasonable if your isotherm data exhibits
a plateau at the highest pressures.
Option 3: Go back to the lab or computer to collect isotherm data
at higher pressures. (Extrapolation can be dangerous!)
"""
)
)
# approximate loading up to first pressure point with Henry's law
# loading = henry_const * P
# henry_const is the initial slope in the adsorption isotherm
henry_const = loadings[0] / pressures[0]
# get how many of the points are less than pressure P
n_points = numpy.sum(pressures < pressure)
if n_points == 0:
# if this pressure is between 0 and first pressure point...
# \int_0^P henry_const P /P dP = henry_const * P ...
return henry_const * pressure
# P > first pressure point
area = loadings[0] # area of first segment \int_0^P_1 n(P)/P dP
# get area between P_1 and P_k, where P_k < P < P_{k+1}
for i in range(n_points - 1):
# linear interpolation of isotherm data
slope = (loadings[i + 1] -
loadings[i]) / (pressures[i + 1] - pressures[i])
intercept = loadings[i] - slope * pressures[i]
# add area of this segment
area += slope * (pressures[i + 1] - pressures[i]) + intercept * \
numpy.log(pressures[i + 1] / pressures[i])
# finally, area of last segment
slope = (
self.loading_at(
pressure,
branch=branch,
pressure_unit=pressure_unit,
pressure_mode=pressure_mode,
loading_unit=loading_unit,
loading_basis=loading_basis,
material_unit=material_unit,
material_basis=material_basis,
interp_fill=interp_fill
) - loadings[n_points - 1]
) / (pressure - pressures[n_points - 1])
intercept = loadings[n_points - 1] - \
slope * pressures[n_points - 1]
area += slope * (pressure - pressures[n_points - 1]) + intercept * \
numpy.log(pressure / pressures[n_points - 1])
return area
|
mit
|
fzalkow/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
37
|
74167
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
jwi078/incubator-airflow
|
airflow/hooks/base_hook.py
|
5
|
2004
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
|
apache-2.0
|
ssh0/growing-string
|
triangular_lattice/mass.py
|
1
|
3095
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2017-01-21
import numpy as np
import random
import matplotlib.pyplot as plt
from tqdm import tqdm
import argparse
from growing_string import Main
from optimize import Optimize_powerlaw
import save_data
def mass_for_beta_one(beta, frames_list, N_r=100, num_of_strings=100):
frames = np.max(frames_list)
center_sample = int(np.min(frames_list) / 2)
L = (frames + 1) * 2
def calc_mass_in_r(self, i, s):
N = len(s.vec) + 1
if N - 3 not in frames_list:
return None
pos = list(s.pos.T)
x, y = self.lattice_X[pos], self.lattice_Y[pos]
X, Y = np.average(x), np.average(y)
R = np.sqrt(np.sum((x - X) ** 2 + (y - Y) ** 2) / float(N))
dist = np.sqrt((x - X) ** 2 + (y - Y) ** 2)
r = np.logspace(1, np.log2(max(dist)), num=N_r, base=2.)
centers_index = sorted(random.sample(range(N), center_sample))
M = []
for _r in r:
res = []
for c in centers_index:
index_x, index_y = s.pos[c]
dist = np.sqrt((x - self.lattice_X[index_x, index_y]) ** 2
+ (y - self.lattice_Y[index_x, index_y]) ** 2)
res.append(len(np.where(dist < _r)[0]))
M.append(np.average(res))
return np.array([r, M]).T
main = Main(Lx=L, Ly=L, plot=False,
frames=frames,
beta=beta,
strings=[{'id': 1, 'x': L/4, 'y': L/2, 'vec': [0, 4]}],
post_function=calc_mass_in_r)
_M = np.array([m for m in main.post_func_res if m is not None])
Ms = {frames_list[i]: _M[i] for i in range(len(frames_list))}
for s in tqdm(range(num_of_strings - 1)):
main = Main(Lx=L, Ly=L, plot=False,
frames=frames,
beta=beta,
strings=[{'id': 1, 'x': L/4, 'y': L/2, 'vec': [0, 4]}],
post_function=calc_mass_in_r)
_M = np.array([m for m in main.post_func_res if m is not None])
# print _M.shape
for i, frames in enumerate(frames_list):
Ms[frames] = np.vstack((Ms[frames], _M[i]))
for frames in frames_list:
r, M = Ms[frames].T
sorted_index = np.argsort(r)
r, M = r[sorted_index], M[sorted_index]
save_data.save("./results/data/mass_in_r/beta=%2.2f_frames=%d_" % (beta, frames),
num_of_strings=num_of_strings,
N_r=N_r, beta=beta, L=L, frames=frames, r=r, M=M)
if __name__ == '__main__':
# frames_list = np.linspace(200, 600, num=3, dtype=np.int)
frames_list = np.linspace(200, 2000, num=10, dtype=np.int)
parser = argparse.ArgumentParser()
parser.add_argument('beta', type=float, nargs=1,
help='parameter beta')
args = parser.parse_args()
beta = args.beta[0]
# beta = 0.
# mass_for_beta_one(beta, frames_list, N_r=4, num_of_strings=3)
mass_for_beta_one(beta, frames_list, N_r=100, num_of_strings=100)
|
mit
|
hitszxp/scikit-learn
|
sklearn/linear_model/omp.py
|
6
|
29556
|
"""Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False, return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path
)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path
)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X = check_array(X)
y = np.asarray(y)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
|
bsd-3-clause
|
kwailamchan/programming-languages
|
python/stock_insights/explorer/date_tables.py
|
3
|
1074
|
import pandas as pd
def first_date_table(data, date_col, subset_col, outfile):
"""Output a csv table by keeping the records of the first date only
inputs:
- data: it must be pandas.Dataframe()
- date_col: the column of the Date, i.e. ['Date']
- subset_col: the column of the subset/ID, i.e. 'Name'
- outfile: the path of the csv table, i.e. "./first_dates.csv"
"""
first_dates = data.sort_values(by=date_col).drop_duplicates(subset=subset_col, keep='first')
first_dates.to_csv(outfile)
def last_date_table(data, date_col, subset_col, outfile):
"""Output a csv table by keeping the records of the first date only
inputs:
- data: it must be pandas.Dataframe()
- date_col: the column of the Date, i.e. ['Date']
- subset_col: the column of the subset/ID, i.e. 'Name'
- outfile: the path of the csv table, i.e. "./last_dates.csv"
"""
last_dates = data.sort_values(by=date_col).drop_duplicates(subset=subset_col, keep='last')
last_dates.to_csv(outfile)
|
mit
|
weinbe58/QuSpin
|
docs/downloads/51839bd4f6d13d9b99c7584c13625513/example9.py
|
3
|
5112
|
from __future__ import print_function, division
import sys,os
# line 4 and line 5 below are for development purposes and can be removed
qspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,qspin_path)
#####################################################################
# example 9 #
# In this script we demonstrate how to use QuSpin's #
# general basis class to construct user-defined symmetry sectors. #
# We study thermalisation in the 2D transverse-field Ising model #
# with periodic boundary conditions. #
#####################################################################
from quspin.operators import hamiltonian, exp_op # operators
from quspin.basis import spin_basis_1d, spin_basis_general # spin basis constructor
from quspin.tools.measurements import obs_vs_time # calculating dynamics
from quspin.tools.Floquet import Floquet_t_vec # period-spaced time vector
import numpy as np # general math functions
import matplotlib.pyplot as plt # plotting library
#
###### define model parameters ######
L_1d = 16 # length of chain for spin 1/2
Lx, Ly = 4, 4 # linear dimension of spin 1 2d lattice
N_2d = Lx*Ly # number of sites for spin 1
Omega = 2.0 # drive frequency
A = 2.0 # drive amplitude
#
###### setting up user-defined symmetry transformations for 2d lattice ######
s = np.arange(N_2d) # sites [0,1,2,....]
x = s%Lx # x positions for sites
y = s//Lx # y positions for sites
T_x = (x+1)%Lx + Lx*y # translation along x-direction
T_y = x +Lx*((y+1)%Ly) # translation along y-direction
P_x = x + Lx*(Ly-y-1) # reflection about x-axis
P_y = (Lx-x-1) + Lx*y # reflection about y-axis
Z = -(s+1) # spin inversion
#
###### setting up bases ######
basis_1d = spin_basis_1d(L_1d,kblock=0,pblock=1,zblock=1) # 1d - basis
basis_2d = spin_basis_general(N_2d,kxblock=(T_x,0),kyblock=(T_y,0),
pxblock=(P_x,0),pyblock=(P_y,0),zblock=(Z,0)) # 2d - basis
# print information about the basis
print("Size of 1D H-space: {Ns:d}".format(Ns=basis_1d.Ns))
print("Size of 2D H-space: {Ns:d}".format(Ns=basis_2d.Ns))
#
###### setting up operators in hamiltonian ######
# setting up site-coupling lists
Jzz_1d=[[-1.0,i,(i+1)%L_1d] for i in range(L_1d)]
hx_1d =[[-1.0,i] for i in range(L_1d)]
#
Jzz_2d=[[-1.0,i,T_x[i]] for i in range(N_2d)]+[[-1.0,i,T_y[i]] for i in range(N_2d)]
hx_2d =[[-1.0,i] for i in range(N_2d)]
# setting up hamiltonians
# 1d
Hzz_1d=hamiltonian([["zz",Jzz_1d]],[],basis=basis_1d,dtype=np.float64)
Hx_1d =hamiltonian([["x",hx_1d]],[],basis=basis_1d,dtype=np.float64)
# 2d
Hzz_2d=hamiltonian([["zz",Jzz_2d]],[],basis=basis_2d,dtype=np.float64)
Hx_2d =hamiltonian([["x",hx_2d]],[],basis=basis_2d,dtype=np.float64)
#
###### calculate initial states ######
# calculating bandwidth for non-driven hamiltonian
[E_1d_min],psi_1d = Hzz_1d.eigsh(k=1,which="SA")
[E_2d_min],psi_2d = Hzz_2d.eigsh(k=1,which="SA")
# setting up initial states
psi0_1d = psi_1d.ravel()
psi0_2d = psi_2d.ravel()
#
###### time evolution ######
# stroboscopic time vector
nT = 200 # number of periods to evolve to
t=Floquet_t_vec(Omega,nT,len_T=1) # t.vals=t, t.i=initial time, t.T=drive period
# creating generators of time evolution using exp_op class
U1_1d = exp_op(Hzz_1d+A*Hx_1d,a=-1j*t.T/4)
U2_1d = exp_op(Hzz_1d-A*Hx_1d,a=-1j*t.T/2)
U1_2d = exp_op(Hzz_2d+A*Hx_2d,a=-1j*t.T/4)
U2_2d = exp_op(Hzz_2d-A*Hx_2d,a=-1j*t.T/2)
# user-defined generator for stroboscopic dynamics
def evolve_gen(psi0,nT,*U_list):
yield psi0
for i in range(nT): # loop over number of periods
for U in U_list: # loop over unitaries
psi0 = U.dot(psi0)
yield psi0
# get generator objects for time-evolved states
psi_1d_t = evolve_gen(psi0_1d,nT,U1_1d,U2_1d,U1_1d)
psi_2d_t = evolve_gen(psi0_2d,nT,U1_2d,U2_2d,U1_2d)
#
###### compute expectation values of observables ######
# measure Hzz as a function of time
Obs_1d_t = obs_vs_time(psi_1d_t,t.vals,dict(E=Hzz_1d),return_state=True)
Obs_2d_t = obs_vs_time(psi_2d_t,t.vals,dict(E=Hzz_2d),return_state=True)
# calculating the entanglement entropy density
Sent_time_1d = basis_1d.ent_entropy(Obs_1d_t["psi_t"],sub_sys_A=range(L_1d//2))["Sent_A"]
Sent_time_2d = basis_2d.ent_entropy(Obs_2d_t["psi_t"],sub_sys_A=range(N_2d//2))["Sent_A"]
# calculate entanglement entropy density
s_p_1d = np.log(2)-2.0**(-L_1d//2)/L_1d
s_p_2d = np.log(2)-2.0**(-N_2d//2)/N_2d
#
###### plotting results ######
plt.plot(t.strobo.inds,(Obs_1d_t["E"]-E_1d_min)/(-E_1d_min),marker='.',markersize=5,label="$S=1/2$")
plt.plot(t.strobo.inds,(Obs_2d_t["E"]-E_2d_min)/(-E_2d_min),marker='.',markersize=5,label="$S=1$")
plt.grid()
plt.ylabel("$Q(t)$",fontsize=20)
plt.xlabel("$t/T$",fontsize=20)
plt.savefig("TFIM_Q.pdf")
plt.figure()
plt.plot(t.strobo.inds,Sent_time_1d/s_p_1d,marker='.',markersize=5,label="$1d$")
plt.plot(t.strobo.inds,Sent_time_2d/s_p_2d,marker='.',markersize=5,label="$2d$")
plt.grid()
plt.ylabel("$s_{\mathrm{ent}}(t)/s_\mathrm{Page}$",fontsize=20)
plt.xlabel("$t/T$",fontsize=20)
plt.legend(loc=0,fontsize=16)
plt.tight_layout()
plt.savefig("TFIM_S.pdf")
#plt.show()
plt.close()
|
bsd-3-clause
|
mm14kwn/2015-12-14-Portsmouth-students
|
ScientificPython/L03-matplotlib/Exercise/solutions/customised_subplots.py
|
2
|
3008
|
#!/usr/bin/env python
#
# Demonstration of customised plotting on multiple subplots
#
# This script plots a column of 3 plots, a pie chart and 2
# vertically stacked histograms
#
# ARCHER, 2015
# Uncomment the following two lines to save an image
# without needing an available display
# import matplotlib
# matplotlib.use("Agg")
# import pyplot, numpy as usual
import matplotlib.pyplot as plt
import numpy as np
## Generate random data for the histograms
# h1 = abs(np.random.randn(100));
# h2 = abs(np.random.randn(100));
## Save data in files
# np.savetxt('histogram1.dat',h1);
# np.savetxt('histogram2.dat',h2);
# Create subplot grid with handles for each subplot
(fig, ax) = plt.subplots(3,1);
# Adjust the figure size
fig.set_size_inches(4,8);
# Set the figure title with suptitle
# Alternatively set the title of the topmost
# plot:
# plt.sca(ax[0]); plt.title('Customise subplots',loc='center');
#
fig.suptitle('Customised subplots');
# Set the current plotting area to the topmost plot, plot 1
plt.sca(ax[0]);
# Pie chart parameters
pie_labels = 'A', 'B', 'C', 'D'
pie_sizes = [15, 30, 45, 10]
pie_colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
pie_radius = 1.1 # size of pie chart
pie_angle = 90 # orientation of pie chart
plt.axis('equal') # have both x, y axis equal
# Plot the pie chart
# The slices will be ordered and plotted counter-clockwise
plt.pie(np.random.random(4), labels=pie_labels, colors=pie_colors,
autopct='%1.1f%%', shadow=True, startangle=pie_angle, radius=pie_radius);
# Load the histogram data
h1 = np.loadtxt('histogram1.dat');
h2 = np.loadtxt('histogram2.dat');
# Set the number of bins
bins = 50
# Set the current axis to the middle plot, plot 2
plt.sca(ax[1]);
# Plot the histogram
plt.hist(h1, bins, color='m',label='hist 1');
plt.ylabel('Plot 2 y-axis');
plt.legend();
# Set the x-axis tick marks
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='on', # ticks along the bottom edge are off
top='on', # ticks along the top edge are off
labelbottom='off') # ticks labels along the bottom are off
# Set the y-axis ticks so that values will not
# overlap with y-axis of bottom plot
yt2=[2,4,6,8];
yt2L=['2','4','6','8'];
plt.yticks(yt2, yt2L);
# Set the current axis to the bottom plot, plot 3
plt.sca(ax[2]);
plt.hist(h2, bins, color='g', label='hist 2');
plt.ylabel('Plot 3 y-axis');
plt.xlabel('Plot 3 x-axis');
plt.legend();
# Set the y-axis tick marks, adjusting for any overlap
# with plot 2
yt3=[0,2,4,6,8];
yt3L=['0','2','4','6','8'];
plt.yticks(yt3, yt3L);
# Want tick marks on the x-axis of the bottom plot
xt3=np.arange(0,4);
xt3L=['0.','1.','2.','3.'];
# Finally remove (though not completely) the space between the
# two histograms so they appear to share the x-axis
plt.subplots_adjust(hspace=0.001);
# Save figure as a PNG image
plt.savefig('customised_subplots.png')
## Show figure
# plt.show()
|
gpl-2.0
|
18padx08/PPTex
|
PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/external/importtools.py
|
85
|
7294
|
"""Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
|
mit
|
jaidevd/scikit-learn
|
examples/manifold/plot_mds.py
|
88
|
2731
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
|
bsd-3-clause
|
victor-prado/broker-manager
|
environment/lib/python3.5/site-packages/pandas/tseries/tests/test_timedeltas.py
|
7
|
78075
|
# pylint: disable-msg=E1101,W0612
from __future__ import division
from datetime import timedelta, time
import nose
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, Timedelta,
TimedeltaIndex, isnull, date_range,
timedelta_range, Int64Index)
from pandas.compat import range
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal, assert_index_equal)
from pandas.tseries.offsets import Day, Second
import pandas.util.testing as tm
from numpy.random import randn
from pandas import _np_version_under1p8
iNaT = tslib.iNaT
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_get_loc_nat(self):
tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00'])
self.assertEqual(tidx.get_loc(pd.NaT), 1)
self.assertEqual(tidx.get_loc(None), 1)
self.assertEqual(tidx.get_loc(float('nan')), 1)
self.assertEqual(tidx.get_loc(np.nan), 1)
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertFalse((v in td))
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertTrue((v in td))
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
self.assert_numpy_array_equal(result, expected)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
td = Timedelta('1 day')
for l, r in [(td, 'a'), ('a', td)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
self.assertFalse(l == r)
self.assertTrue(l != r)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit='D')
result = timedelta_range('0 days', periods=5, freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit='D')
result = timedelta_range('0 days', '10 days', freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day()
result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02',
freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2)
result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit='T') * 30
result = timedelta_range('0 days', freq='30T', periods=50)
tm.assert_index_equal(result, expected)
# GH 11776
arr = np.arange(10).reshape(2, 5)
df = pd.DataFrame(np.arange(10).reshape(2, 5))
for arg in (arr, df):
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_timedelta(arg)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_timedelta(arg, errors=errors)
# issue10583
df = pd.DataFrame(np.random.normal(size=(10, 4)))
df.index = pd.timedelta_range(start='0s', periods=10, freq='s')
expected = df.loc[pd.Timedelta('0s'):, :]
result = df.loc['0s':, :]
assert_frame_equal(expected, result)
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000, 'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000, 'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000, 'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000, 'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10, 'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100, 'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000, 'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000, 'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000, 'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000, 'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1, 's')))
self.assertEqual(ct('06:00:01'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.0'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.01'), conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
self.assertEqual(ct('- 1days, 00:00:01'),
conv(-d1 + np.timedelta64(1, 's')))
self.assertEqual(ct('1days, 06:00:01'), conv(
d1 + np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), tslib.iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), tslib.iNaT)
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(to_timedelta('1 days 06:05:01.00003', box=False),
conv(d1 + np.timedelta64(6 * 3600 +
5 * 60 + 1, 's') +
np.timedelta64(30, 'us')))
self.assertEqual(to_timedelta('15.5us', box=False),
conv(np.timedelta64(15500, 'ns')))
# empty string
result = to_timedelta('', box=False)
self.assertEqual(result.astype('int64'), tslib.iNaT)
result = to_timedelta(['', ''])
self.assertTrue(isnull(result).all())
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, 's')]))
expected = pd.Index(np.array([np.timedelta64(1, 's')]))
tm.assert_index_equal(result, expected)
# ints
result = np.timedelta64(0, 'ns')
expected = to_timedelta(0, box=False)
self.assertEqual(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d', '1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([np.timedelta64(0, 'ns'), np.timedelta64(
10, 's').astype('m8[ns]')])
expected = to_timedelta([0, 10], unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
# arrays of various dtypes
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='s')
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='m')
expected = TimedeltaIndex([np.timedelta64(1, 'm')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='h')
expected = TimedeltaIndex([np.timedelta64(1, 'h')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 'D')] * 5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3) * 1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array(
[0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_to_timedelta_invalid(self):
# bad value for errors parameter
msg = "errors must be one of"
tm.assertRaisesRegexp(ValueError, msg, to_timedelta,
['foo'], errors='never')
# these will error
self.assertRaises(ValueError, lambda: to_timedelta([1, 2], unit='foo'))
self.assertRaises(ValueError, lambda: to_timedelta(1, unit='foo'))
# time not supported ATM
self.assertRaises(ValueError, lambda: to_timedelta(time(second=1)))
self.assertTrue(to_timedelta(
time(second=1), errors='coerce') is pd.NaT)
self.assertRaises(ValueError, lambda: to_timedelta(['foo', 'bar']))
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(['foo', 'bar'], errors='coerce'))
tm.assert_index_equal(TimedeltaIndex(['1 day', pd.NaT, '1 min']),
to_timedelta(['1 day', 'bar', '1 min'],
errors='coerce'))
# gh-13613: these should not error because errors='ignore'
invalid_data = 'apple'
self.assertEqual(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
invalid_data = ['apple', '1 days']
tm.assert_numpy_array_equal(
np.array(invalid_data, dtype=object),
to_timedelta(invalid_data, errors='ignore'))
invalid_data = pd.Index(['apple', '1 days'])
tm.assert_index_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
invalid_data = Series(['apple', '1 days'])
tm.assert_series_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, 's')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so might be some loss of
# precision
self.assertTrue(np.allclose(result.value / 1000, expected.value /
1000))
# sum
self.assertRaises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
self.assertRaises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'),
timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
actual = pd.to_timedelta(pd.NaT)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
self.assertEqual(result, expected)
result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1,
milliseconds=1, microseconds=1, nanoseconds=1)
expected = Timedelta(694861001001001)
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
self.assertEqual(result, expected)
self.assertRaises(TypeError, lambda: Timedelta(nanoseconds='abc'))
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
# TODO: unused?
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = self.round_trip_pickle(v)
self.assertEqual(v, v_p)
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
self.assertEqual(hash(v), hash(td))
d = {td: 2}
self.assertEqual(d[v], 2)
tds = timedelta_range('1 second', periods=20)
self.assertTrue(all(hash(td) == hash(td.to_pytimedelta()) for td in
tds))
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
self.assertNotEqual(hash(ns_td), hash(ns_td.to_pytimedelta()))
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
self.assertTrue(min_td.value == np.iinfo(np.int64).min + 1)
self.assertTrue(max_td.value == np.iinfo(np.int64).max)
# Beyond lower limit, a NAT before the Overflow
self.assertIsInstance(min_td - Timedelta(1, 'ns'),
pd.tslib.NaTType)
with tm.assertRaises(OverflowError):
min_td - Timedelta(2, 'ns')
with tm.assertRaises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
self.assertIsInstance(td, pd.tslib.NaTType)
with tm.assertRaises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with tm.assertRaises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
class TestTimedeltaIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range('1 days', '10 days')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_pickle(self):
rng = timedelta_range('1 days', periods=10)
rng_p = self.round_trip_pickle(rng)
tm.assert_index_equal(rng, rng_p)
def test_hash_error(self):
index = timedelta_range('1 days', periods=10)
with tm.assertRaisesRegexp(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_append_join_nondatetimeindex(self):
rng = timedelta_range('1 days', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assertIsInstance(result[0], Timedelta)
# it works
rng.join(idx, how='outer')
def test_append_numpy_bug_1681(self):
td = timedelta_range('1 days', '10 days', freq='2D')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': td}, index=td)
str(c)
result = a.append(c)
self.assertTrue((result['B'] == td).all())
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
self.assert_numpy_array_equal(rng.days, np.array(
[1, 1], dtype='int64'))
self.assert_numpy_array_equal(
rng.seconds,
np.array([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],
dtype='int64'))
self.assert_numpy_array_equal(rng.microseconds, np.array(
[100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))
self.assert_numpy_array_equal(rng.nanoseconds, np.array(
[456, 456], dtype='int64'))
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))
tm.assert_series_equal(s.dt.seconds, Series(
[10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]))
def test_total_seconds(self):
# GH 10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), np.array(expt))
# test Series
s = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with nat
s[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with both nat
s = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(s.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
self.assertFalse(result.iloc[0].isnull().all())
self.assertTrue(result.iloc[1].isnull().all())
def test_constructor(self):
expected = TimedeltaIndex(['1 days', '1 days 00:00:05', '2 days',
'2 days 00:00:02', '0 days 00:00:03'])
result = TimedeltaIndex(['1 days', '1 days, 00:00:05', np.timedelta64(
2, 'D'), timedelta(days=2, seconds=2), pd.offsets.Second(3)])
tm.assert_index_equal(result, expected)
# unicode
result = TimedeltaIndex([u'1 days', '1 days, 00:00:05', np.timedelta64(
2, 'D'), timedelta(days=2, seconds=2), pd.offsets.Second(3)])
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01',
'0 days 00:00:02'])
tm.assert_index_equal(TimedeltaIndex(range(3), unit='s'), expected)
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:05',
'0 days 00:00:09'])
tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit='s'), expected)
expected = TimedeltaIndex(
['0 days 00:00:00.400', '0 days 00:00:00.450',
'0 days 00:00:01.200'])
tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'),
expected)
def test_constructor_coverage(self):
rng = timedelta_range('1 days', periods=10.5)
exp = timedelta_range('1 days', periods=10)
self.assert_index_equal(rng, exp)
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
periods='foo', freq='D')
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
end='10 days')
self.assertRaises(ValueError, TimedeltaIndex, '1 days')
# generator expression
gen = (timedelta(i) for i in range(10))
result = TimedeltaIndex(gen)
expected = TimedeltaIndex([timedelta(i) for i in range(10)])
self.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['1 days', '2 days', '3 days'])
result = TimedeltaIndex(strings)
expected = to_timedelta([1, 2, 3], unit='d')
self.assert_index_equal(result, expected)
from_ints = TimedeltaIndex(expected.asi8)
self.assert_index_equal(from_ints, expected)
# non-conforming freq
self.assertRaises(ValueError, TimedeltaIndex,
['1 days', '2 days', '4 days'], freq='D')
self.assertRaises(ValueError, TimedeltaIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = TimedeltaIndex(start='1 days', periods=1, freq='D', name='TEST')
self.assertEqual(idx.name, 'TEST')
# GH10025
idx2 = TimedeltaIndex(idx, name='something else')
self.assertEqual(idx2.name, 'something else')
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range('20130101', periods=4)) - \
Series(date_range('20121201', periods=4))
td[2] += timedelta(minutes=5, seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1, 'D')
expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan
])
assert_series_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Series([31, 31, 31, np.nan])
assert_series_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_series_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_series_equal(result, expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1, 'D')
expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Index([31, 31, 31, np.nan])
assert_index_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_index_equal(result, expected)
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
exp = np.array([True, True, True] + [False] * 7)
self.assert_numpy_array_equal(result, exp)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
if _np_version_under1p8:
# cannot test array because np.datetime('nat') returns today's date
cases = [(tdidx1, tdidx2)]
else:
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
self.assert_numpy_array_equal(result, expected)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_map(self):
rng = timedelta_range('1 day', periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = np.array([f(x) for x in rng], dtype=np.int64)
self.assert_numpy_array_equal(result, exp)
def test_misc_coverage(self):
rng = timedelta_range('1 day', periods=5)
result = rng.groupby(rng.days)
tm.assertIsInstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(['3d', '1d', '2d'])
self.assertFalse(idx.equals(list(idx)))
non_td = Index(list('abc'))
self.assertFalse(idx.equals(list(non_td)))
def test_union(self):
i1 = timedelta_range('1day', periods=5)
i2 = timedelta_range('3day', periods=5)
result = i1.union(i2)
expected = timedelta_range('1day', periods=7)
self.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = TimedeltaIndex(start='1 day', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_coverage(self):
idx = TimedeltaIndex(['3d', '1d', '2d'])
ordered = TimedeltaIndex(idx.sort_values(), freq='infer')
result = ordered.union(idx)
self.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
self.assert_index_equal(result, ordered)
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = timedelta_range('1 day', periods=4, freq='3H')
rng_b = timedelta_range('1 day', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(['1 day 15:19:49.695000'])
right = TimedeltaIndex(['2 day 13:04:21.322000',
'1 day 15:27:24.873000',
'1 day 15:31:05.350000'])
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assert_index_equal(result, exp)
def test_intersection_bug_1708(self):
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
self.assertEqual(len(result), 0)
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range('1 day 01:00:00', periods=3, freq='h')
tm.assert_index_equal(result, expected)
def test_get_duplicates(self):
idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day',
'4day'])
result = idx.get_duplicates()
ex = TimedeltaIndex(['2 day', '3day'])
self.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = TimedeltaIndex(['1 day 00:00:05', '1 day 00:00:01',
'1 day 00:00:02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_sort_values(self):
idx = TimedeltaIndex(['4d', '1d', '2d'])
ordered = idx.sort_values()
self.assertTrue(ordered.is_monotonic)
ordered = idx.sort_values(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.sort_values(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer,
np.array([1, 2, 0]),
check_dtype=False)
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer,
np.array([0, 2, 1]),
check_dtype=False)
def test_insert(self):
idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx')
self.assert_index_equal(result, exp)
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
self.assertNotIsInstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02',
'1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01',
'1day 00:00:02', '1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D',
name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D',
name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(
['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D',
name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d',
'7 d', '8 d', '9 d', '10d'],
freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
result = idx.delete(slice(n[0], n[-1] + 1))
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_take(self):
tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00']
idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx')
expected = TimedeltaIndex(tds, freq=None, name='idx')
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
self.assert_index_equal(taken, expected)
tm.assertIsInstance(taken, TimedeltaIndex)
self.assertIsNone(taken.freq)
self.assertEqual(taken.name, expected.name)
def test_take_fill_value(self):
# GH 12631
idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'],
name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'],
name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
idx.take(np.array([1, -5]))
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='td')
str(df)
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
tm.assert_index_equal(cols, joined)
def test_slice_keeps_name(self):
# GH4226
dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = timedelta_range('1 day', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
tm.assert_index_equal(index, joined)
def test_factorize(self):
idx1 = TimedeltaIndex(['1 day', '1 day', '2 day', '2 day', '3 day',
'3 day'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = TimedeltaIndex(['1 day', '2 day', '3 day'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = timedelta_range('1 day', periods=4, freq='s')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assert_index_equal(idx, idx3)
class TestSlicing(tm.TestCase):
def test_partial_slice(self):
rng = timedelta_range('1 day 10:11:12', freq='h', periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['5 day':'6 day']
expected = s.iloc[86:134]
assert_series_equal(result, expected)
result = s['5 day':]
expected = s.iloc[86:]
assert_series_equal(result, expected)
result = s[:'6 day']
expected = s.iloc[:134]
assert_series_equal(result, expected)
result = s['6 days, 23:11:12']
self.assertEqual(result, s.iloc[133])
self.assertRaises(KeyError, s.__getitem__, '50 days')
def test_partial_slice_high_reso(self):
# higher reso
rng = timedelta_range('1 day 10:11:12', freq='us', periods=2000)
s = Series(np.arange(len(rng)), index=rng)
result = s['1 day 10:11:12':]
expected = s.iloc[0:]
assert_series_equal(result, expected)
result = s['1 day 10:11:12.001':]
expected = s.iloc[1000:]
assert_series_equal(result, expected)
result = s['1 days, 10:11:12.001001']
self.assertEqual(result, s.iloc[1001])
def test_slice_with_negative_step(self):
ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Timedelta(hours=7)::-1], SLC[7::-1])
assert_slices_equivalent(SLC['7 hours'::-1], SLC[7::-1])
assert_slices_equivalent(SLC[:Timedelta(hours=7):-1], SLC[:6:-1])
assert_slices_equivalent(SLC[:'7 hours':-1], SLC[:6:-1])
assert_slices_equivalent(SLC['15 hours':'7 hours':-1], SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):Timedelta(hours=7):-
1], SLC[15:6:-1])
assert_slices_equivalent(SLC['15 hours':Timedelta(hours=7):-1],
SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):'7 hours':-1],
SLC[15:6:-1])
assert_slices_equivalent(SLC['7 hours':'15 hours':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4D')
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2D')
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
msg = "Overflow in int64 addition"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
SirEdvin/Pandas-Pipe
|
pandaspipe/pipeline.py
|
1
|
10315
|
# -*- coding:utf-8 -*-
import abc
import sys
import inspect
import types
import itertools
import networkx as nx
from pandaspipe.util import patch_list, isSubset
from pandaspipe.base import PipelineEntity
import logging
_log = logging.getLogger(__name__)
_log.addHandler(logging.StreamHandler(stream=sys.stdout))
class Pipeline:
def __init__(self, name='Undefined Pipeline', env=None):
"""(Pipeline, str) -> NoneType
Creating the contents of the Pipeline Object
"""
if env is None:
env = {}
self._entities = []
self.name = name
self.env = env
self.graph = None
def process(self, channels=('root',), ignore_outlet_node=False, output_channels=()):
"""(Pipeline, pandas.DataFrame, str) -> type(df_map)
*Description*
:param ignore_outlet_node:
"""
start_nodes = [self._get_start_node(channel) for channel in channels]
active_dfs = {}
active_nodes = []
acomplete_nodes = self.graph.nodes()
complete_nodes = []
active_nodes.extend(start_nodes)
while len(active_nodes) > 0:
next_nodes = []
processed = False
for active_node in active_nodes:
pred_nodes = self.graph.pred.get(active_node).keys()
depencencies = active_node.external_dependencies
if (len(pred_nodes) == 0 or isSubset(complete_nodes, pred_nodes)) and isSubset(active_dfs.keys(), depencencies):
_log.info('Call entity %s' % active_node)
processed = True
# Process
parameters = [active_dfs[channel] for channel in active_node.input_channels]
if active_node.type in ('node', 'bignode'):
external_dependencies = {}
if active_node.external_dependencies:
for external_dependency in active_node.external_dependencies:
external_dependencies[external_dependency] = active_dfs[external_dependency]
self.env['ext_dep'] = external_dependencies
result = active_node(*parameters)
active_nodes.remove(active_node)
complete_nodes.append(active_node)
acomplete_nodes.remove(active_node)
# Update active dataframes
if len(active_node.output_channels) == 1:
active_dfs[active_node.output_channels[0]] = result
elif len(active_node.output_channels) > 1:
active_dfs.update(result)
# Add next nodes
for node in self.graph.succ.get(active_node).keys():
if node not in active_nodes and node not in next_nodes:
next_nodes.append(node)
if not processed:
_log.error('Infinite cycle detected!')
return None
active_nodes.extend(next_nodes)
# Clear useless dfs
# Check if required by next node
for channel in active_dfs.keys():
if channel not in output_channels and len(
[active_node for active_node in active_nodes if channel in active_node.input_channels]) == 0:
# Check if required by external dependencies
required = reduce(lambda x, y: x or y, [channel in node.external_dependencies for node in acomplete_nodes], False)
if not required:
active_dfs.pop(channel)
if len(active_dfs.keys()) == 1:
return active_dfs.values()[0]
return active_dfs
def append(self, cls, channel=None, output_channel=None, construct_arguments=()):
"""(Pipeline, classobj, str, str) -> NoneType
*Description*
:param construct_arguments:
:param cls:
:param channel:
:param output_channel:
"""
self(channel, output_channel, construct_arguments=construct_arguments)(cls)
def build_process_graph(self):
builder = GraphBuilder(self._entities)
return builder.build()
def _check_graph(self):
if self.graph is None:
self.graph = self.build_process_graph()
def _get_start_node(self, channel):
self._check_graph()
nodes = filter(lambda x: channel in x.output_channels and x.type == 'source', self.graph.nodes())
if len(nodes) > 0:
return nodes[0]
raise Exception('You can\'t use channel without source node')
def _process_entity(self, cls, channel, outchannel, construct_arguments, priority):
"""(Pipeline, type(cls), type(channel), type(outchannel),
type(entity_map)) -> type(cls)
*Description*
"""
obj = cls(*construct_arguments)
obj.env = self.env
if priority:
obj.priority = priority
obj.register(self)
self._entities.append(obj)
if channel is None and len(obj.input_channels) == 0 and len(obj.output_channels) == 0:
channel = 'root'
if channel:
if outchannel is None:
outchannel = channel
if obj.type == 'node':
obj.input_channels = channel[:1] if isinstance(channel, list) else [channel]
obj.output_channels = outchannel[:1] if isinstance(outchannel, list) else [outchannel]
elif obj.type == 'bignode':
patch_list(obj.input_channels, channel)
patch_list(obj.output_channels, outchannel)
elif obj.type == 'source':
obj.input_channels = []
patch_list(obj.output_channels, outchannel)
elif obj.type == 'outlet':
patch_list(obj.input_channels, channel)
obj.output_channels = []
else:
raise Exception('Well, you use bad type for entity ....')
return cls
def __call__(self, channel=None, outchannel=None, construct_arguments=(), priority=None):
"""(Pipeline, str, str) ->
type(process_function)
*Description*
"""
def process_function(cls):
"""(type(cls)) ->
type(self._process_entity(cls, channel, outchannel, self._filters))
*Description*
:param cls:
"""
cls_mro = inspect.getmro(cls)
if PipelineEntity in cls_mro:
self._process_entity(cls, channel, outchannel, construct_arguments, priority)
return cls
if inspect.isclass(channel) or isinstance(channel, abc.ABCMeta):
cls = channel
channel = None
return process_function(cls)
return process_function
class GraphBuilder:
def __init__(self, entities):
self.entities = entities
self.channel_io_nodes = {}
self.graph = nx.DiGraph()
pass
def build(self):
self.graph.add_nodes_from(self.entities)
self._build_inchannel_connections()
self._build_multichannel_connections()
self._validate_external_dependencies()
return self.graph
def _build_inchannel_connections(self):
all_channels = set(
itertools.chain(*map(lambda x: set(itertools.chain(x.input_channels, x.output_channels)), self.entities)))
for channel in all_channels:
# Process simple nodes
channel_nodes = filter(lambda x: x.type == 'node'
and channel in x.input_channels and channel in x.output_channels,
self.entities)
channel_nodes.sort(key=lambda x: (x.priority, x.__class__.__name__))
self.channel_io_nodes[channel] = {}
if len(channel_nodes) > 0:
self.channel_io_nodes[channel]['input'] = channel_nodes[0]
self.channel_io_nodes[channel]['output'] = channel_nodes[-1]
# noinspection PyCompatibility
for i in xrange(0, len(channel_nodes) - 1):
self.graph.add_edge(channel_nodes[i], channel_nodes[i + 1])
# Process outlet and source
input_nodes = filter(lambda x: x.type == 'source' and channel in x.output_channels, self.entities)
assert len(input_nodes) in (0, 1), 'You can\'t use many input nodes for one channel'
if len(input_nodes) > 0:
if len(channel_nodes) > 0:
self.graph.add_edge(input_nodes[0], self.channel_io_nodes[channel]['input'])
else:
self.graph.add_node(input_nodes[0])
self.channel_io_nodes[channel]['output'] = input_nodes[0]
output_nodes = filter(lambda x: x.type == 'outlet' and channel in x.input_channels, self.entities)
self.graph.add_nodes_from(output_nodes)
if len(output_nodes) > 0:
self.channel_io_nodes[channel]['outlets'] = output_nodes
if len(channel_nodes) > 0:
for output_node in output_nodes:
self.graph.add_edge(self.channel_io_nodes[channel]['output'], output_node)
pass
def _build_multichannel_connections(self):
for node in filter(lambda x: x.type in ('bignode', 'node') and x.input_channels != x.output_channels,
self.entities):
for input_channel in node.input_channels:
self.graph.add_edge(self.channel_io_nodes[input_channel]['output'], node)
for output_channel in node.output_channels:
channel_info = self.channel_io_nodes[output_channel]
if not channel_info.get('input') and not channel_info.get('outlets'):
raise Exception('You have problem with graph')
if channel_info.get('input'):
self.graph.add_edge(node, channel_info['input'])
if channel_info.get('outlets'):
for outlet in channel_info.get('outlets'):
self.graph.add_edge(node, outlet)
def _validate_external_dependencies(self):
pass
|
apache-2.0
|
hotpxl/nebuchadnezzar
|
sentiment/twitter/test_learning_rate.py
|
4
|
4298
|
"""
Comparing adagrad, adadelta and constant learning in gradient descent(the seddle point function y^2 - x^2)
Reference:
1. comparison on several learning rate update scheme: http://ml.memect.com/archive/2014-12-12/short.html#3786866375172817
2. Saddle point, http://en.wikipedia.org/wiki/Saddle_point
"""
import numpy as np
import theano
import theano.tensor as T
rho = 0.95
epsilon = 0.00001
gamma = 0.1
const_lr = 0.01
init_x = [0.1, 0.1]
x = theano.shared(
np.array(init_x, dtype = theano.config.floatX),
borrow = True,
name = "x"
)
tolorate = 0.01
params = [x]
param_shapes = [(2,)]
# cost = 0.5 * (x[0]-2) ** 2 + (x[1]-2) ** 2
cost = x[0] ** 2 - x[1] ** 2
param_grads = [T.grad(cost, param) for param in params]
def make_func(x, cost, updates, init_x):
x.set_value(init_x)
f = theano.function(
inputs = [],
outputs = [x, cost],
updates = updates
)
return f
def simulate(f, n_epoch_max = 100):
epoch = 0
used_epochs = 0
xs = []
print "##################"
while epoch < n_epoch_max:
x_val, cost_val = f()
xs.append(x_val)
# if abs(cost_val) < tolorate:
# break
epoch += 1
used_epochs += 1
return xs, used_epochs
###############
# ADADELTA #
###############
print "Using AdaDelta with rho = %f and epsilon = %f" %(rho, epsilon)
egs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Eg:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
exs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Ex:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
new_egs = [
rho * eg + (1 - rho) * g ** 2
for eg, g in zip(egs, param_grads)
]
delta_x = [
-(T.sqrt(ex + epsilon) / T.sqrt(new_eg + epsilon)) * g
for new_eg, ex, g in zip(new_egs, exs, param_grads)
]
new_exs = [
rho * ex + (1 - rho) * (dx ** 2)
for ex, dx in zip(exs, delta_x)
]
egs_updates = zip(egs, new_egs)
exs_updates = zip(exs, new_exs)
param_updates = [
(p, p + dx)
for dx, g, p in zip(delta_x, param_grads, params)
]
updates = egs_updates + exs_updates + param_updates
f = make_func(x, cost, updates, init_x)
adadelta_xs, adadelta_epochs = simulate(f)
##############
# ADAGRAD #
##############
print "Using AdaGrad with gamma = %f and epsilon = %f" %(gamma, epsilon)
grad_hists = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "grad_hist:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
new_grad_hists = [
g_hist + g ** 2
for g_hist, g in zip(grad_hists, param_grads)
]
param_updates = [
(param, param - theano.printing.Print("lr")(gamma * epsilon / (T.sqrt(g_hist) + epsilon)) * param_grad)
for param, param_grad in zip(params, param_grads)
]
grad_hist_update = zip(grad_hists, new_grad_hists)
updates = grad_hist_update + param_updates
f = make_func(x, cost, updates, init_x)
adagrad_xs, adagrad_epochs = simulate(f)
###############
# constant lr #
###############
print "Usin constant learning rate %f" %(const_lr)
updates = [
(param, param - const_lr * param_grad)
for param, param_grad in zip(params, param_grads)
]
f = make_func(x, cost, updates, init_x)
const_lr_xs, const_lr_epochs = simulate(f)
from matplotlib import pyplot as plt
def myplot(data, style, title, plot_number, total):
plt.subplot(1,total,plot_number)
x, y = zip(*data)
plt.plot(x, y, 'ro-')
plt.title(title)
plt.xlim([-10, 10]); plt.ylim([-10, 10])
myplot(adadelta_xs,
'ro-',
"AdaDelta(%d epochs)" %(adadelta_epochs),
1, 3)
myplot(adagrad_xs,
'ro-',
"AdaGrad(%d epochs)" %(adagrad_epochs),
2, 3)
myplot(const_lr_xs,
'ro-',
"ConstLR(%d epochs)" %(const_lr_epochs),
3, 3)
plt.show()
|
mit
|
shoyer/xarray
|
xarray/tests/test_computation.py
|
1
|
40215
|
import functools
import operator
import pickle
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
import xarray as xr
from xarray.core.computation import (
_UFuncSignature,
apply_ufunc,
broadcast_compat_data,
collect_dict_values,
join_dict_keys,
ordered_set_intersection,
ordered_set_union,
result_name,
unified_dim_sizes,
)
from . import has_dask, raises_regex, requires_dask
def assert_identical(a, b):
if hasattr(a, "identical"):
msg = f"not identical:\n{a!r}\n{b!r}"
assert a.identical(b), msg
else:
assert_array_equal(a, b)
def test_signature_properties():
sig = _UFuncSignature([["x"], ["x", "y"]], [["z"]])
assert sig.input_core_dims == (("x",), ("x", "y"))
assert sig.output_core_dims == (("z",),)
assert sig.all_input_core_dims == frozenset(["x", "y"])
assert sig.all_output_core_dims == frozenset(["z"])
assert sig.num_inputs == 2
assert sig.num_outputs == 1
assert str(sig) == "(x),(x,y)->(z)"
assert sig.to_gufunc_string() == "(dim0),(dim0,dim1)->(dim2)"
# dimension names matter
assert _UFuncSignature([["x"]]) != _UFuncSignature([["y"]])
def test_result_name():
class Named:
def __init__(self, name=None):
self.name = name
assert result_name([1, 2]) is None
assert result_name([Named()]) is None
assert result_name([Named("foo"), 2]) == "foo"
assert result_name([Named("foo"), Named("bar")]) is None
assert result_name([Named("foo"), Named()]) is None
def test_ordered_set_union():
assert list(ordered_set_union([[1, 2]])) == [1, 2]
assert list(ordered_set_union([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_union([[0], [1, 2], [1, 3]])) == [0, 1, 2, 3]
def test_ordered_set_intersection():
assert list(ordered_set_intersection([[1, 2]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [1, 3]])) == [1]
assert list(ordered_set_intersection([[1, 2], [2]])) == [2]
def test_join_dict_keys():
dicts = [dict.fromkeys(keys) for keys in [["x", "y"], ["y", "z"]]]
assert list(join_dict_keys(dicts, "left")) == ["x", "y"]
assert list(join_dict_keys(dicts, "right")) == ["y", "z"]
assert list(join_dict_keys(dicts, "inner")) == ["y"]
assert list(join_dict_keys(dicts, "outer")) == ["x", "y", "z"]
with pytest.raises(ValueError):
join_dict_keys(dicts, "exact")
with pytest.raises(KeyError):
join_dict_keys(dicts, "foobar")
def test_collect_dict_values():
dicts = [{"x": 1, "y": 2, "z": 3}, {"z": 4}, 5]
expected = [[1, 0, 5], [2, 0, 5], [3, 4, 5]]
collected = collect_dict_values(dicts, ["x", "y", "z"], fill_value=0)
assert collected == expected
def identity(x):
return x
def test_apply_identity():
array = np.arange(10)
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
apply_identity = functools.partial(apply_ufunc, identity)
assert_identical(array, apply_identity(array))
assert_identical(variable, apply_identity(variable))
assert_identical(data_array, apply_identity(data_array))
assert_identical(data_array, apply_identity(data_array.groupby("x")))
assert_identical(dataset, apply_identity(dataset))
assert_identical(dataset, apply_identity(dataset.groupby("x")))
def add(a, b):
return apply_ufunc(operator.add, a, b)
def test_apply_two_inputs():
array = np.array([1, 2, 3])
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
zero_array = np.zeros_like(array)
zero_variable = xr.Variable("x", zero_array)
zero_data_array = xr.DataArray(zero_variable, [("x", -array)])
zero_dataset = xr.Dataset({"y": zero_variable}, {"x": -array})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby("x"), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby("x")))
assert_identical(dataset, add(data_array.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby("x")))
assert_identical(dataset, add(dataset.groupby("x"), zero_data_array))
assert_identical(dataset, add(dataset.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby("x")))
assert_identical(dataset, add(zero_dataset, dataset.groupby("x")))
def test_apply_1d_and_0d():
array = np.array([1, 2, 3])
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
zero_array = 0
zero_variable = xr.Variable((), zero_array)
zero_data_array = xr.DataArray(zero_variable)
zero_dataset = xr.Dataset({"y": zero_variable})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby("x"), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby("x")))
assert_identical(dataset, add(data_array.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby("x")))
assert_identical(dataset, add(dataset.groupby("x"), zero_data_array))
assert_identical(dataset, add(dataset.groupby("x"), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby("x")))
assert_identical(dataset, add(zero_dataset, dataset.groupby("x")))
def test_apply_two_outputs():
array = np.arange(5)
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
def twice(obj):
def func(x):
return (x, x)
return apply_ufunc(func, obj, output_core_dims=[[], []])
out0, out1 = twice(array)
assert_identical(out0, array)
assert_identical(out1, array)
out0, out1 = twice(variable)
assert_identical(out0, variable)
assert_identical(out1, variable)
out0, out1 = twice(data_array)
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset)
assert_identical(out0, dataset)
assert_identical(out1, dataset)
out0, out1 = twice(data_array.groupby("x"))
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset.groupby("x"))
assert_identical(out0, dataset)
assert_identical(out1, dataset)
def test_apply_input_core_dimension():
def first_element(obj, dim):
def func(x):
return x[..., 0]
return apply_ufunc(func, obj, input_core_dims=[[dim]])
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(["x", "y"], array)
data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]})
dataset = xr.Dataset({"data": data_array})
expected_variable_x = xr.Variable(["y"], [1, 2])
expected_data_array_x = xr.DataArray(expected_variable_x, {"y": [-1, -2]})
expected_dataset_x = xr.Dataset({"data": expected_data_array_x})
expected_variable_y = xr.Variable(["x"], [1, 3])
expected_data_array_y = xr.DataArray(expected_variable_y, {"x": ["a", "b"]})
expected_dataset_y = xr.Dataset({"data": expected_data_array_y})
assert_identical(expected_variable_x, first_element(variable, "x"))
assert_identical(expected_variable_y, first_element(variable, "y"))
assert_identical(expected_data_array_x, first_element(data_array, "x"))
assert_identical(expected_data_array_y, first_element(data_array, "y"))
assert_identical(expected_dataset_x, first_element(dataset, "x"))
assert_identical(expected_dataset_y, first_element(dataset, "y"))
assert_identical(expected_data_array_x, first_element(data_array.groupby("y"), "x"))
assert_identical(expected_dataset_x, first_element(dataset.groupby("y"), "x"))
def multiply(*args):
val = args[0]
for arg in args[1:]:
val = val * arg
return val
# regression test for GH:2341
with pytest.raises(ValueError):
apply_ufunc(
multiply,
data_array,
data_array["y"].values,
input_core_dims=[["y"]],
output_core_dims=[["y"]],
)
expected = xr.DataArray(
multiply(data_array, data_array["y"]), dims=["x", "y"], coords=data_array.coords
)
actual = apply_ufunc(
multiply,
data_array,
data_array["y"].values,
input_core_dims=[["y"], []],
output_core_dims=[["y"]],
)
assert_identical(expected, actual)
def test_apply_output_core_dimension():
def stack_negative(obj):
def func(x):
return np.stack([x, -x], axis=-1)
result = apply_ufunc(func, obj, output_core_dims=[["sign"]])
if isinstance(result, (xr.Dataset, xr.DataArray)):
result.coords["sign"] = [1, -1]
return result
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(["x", "y"], array)
data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]})
dataset = xr.Dataset({"data": data_array})
stacked_array = np.array([[[1, -1], [2, -2]], [[3, -3], [4, -4]]])
stacked_variable = xr.Variable(["x", "y", "sign"], stacked_array)
stacked_coords = {"x": ["a", "b"], "y": [-1, -2], "sign": [1, -1]}
stacked_data_array = xr.DataArray(stacked_variable, stacked_coords)
stacked_dataset = xr.Dataset({"data": stacked_data_array})
assert_identical(stacked_array, stack_negative(array))
assert_identical(stacked_variable, stack_negative(variable))
assert_identical(stacked_data_array, stack_negative(data_array))
assert_identical(stacked_dataset, stack_negative(dataset))
assert_identical(stacked_data_array, stack_negative(data_array.groupby("x")))
assert_identical(stacked_dataset, stack_negative(dataset.groupby("x")))
def original_and_stack_negative(obj):
def func(x):
return (x, np.stack([x, -x], axis=-1))
result = apply_ufunc(func, obj, output_core_dims=[[], ["sign"]])
if isinstance(result[1], (xr.Dataset, xr.DataArray)):
result[1].coords["sign"] = [1, -1]
return result
out0, out1 = original_and_stack_negative(array)
assert_identical(array, out0)
assert_identical(stacked_array, out1)
out0, out1 = original_and_stack_negative(variable)
assert_identical(variable, out0)
assert_identical(stacked_variable, out1)
out0, out1 = original_and_stack_negative(data_array)
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset)
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
out0, out1 = original_and_stack_negative(data_array.groupby("x"))
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset.groupby("x"))
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
def test_apply_exclude():
def concatenate(objects, dim="x"):
def func(*x):
return np.concatenate(x, axis=-1)
result = apply_ufunc(
func,
*objects,
input_core_dims=[[dim]] * len(objects),
output_core_dims=[[dim]],
exclude_dims={dim},
)
if isinstance(result, (xr.Dataset, xr.DataArray)):
# note: this will fail if dim is not a coordinate on any input
new_coord = np.concatenate([obj.coords[dim] for obj in objects])
result.coords[dim] = new_coord
return result
arrays = [np.array([1]), np.array([2, 3])]
variables = [xr.Variable("x", a) for a in arrays]
data_arrays = [
xr.DataArray(v, {"x": c, "y": ("x", range(len(c)))})
for v, c in zip(variables, [["a"], ["b", "c"]])
]
datasets = [xr.Dataset({"data": data_array}) for data_array in data_arrays]
expected_array = np.array([1, 2, 3])
expected_variable = xr.Variable("x", expected_array)
expected_data_array = xr.DataArray(expected_variable, [("x", list("abc"))])
expected_dataset = xr.Dataset({"data": expected_data_array})
assert_identical(expected_array, concatenate(arrays))
assert_identical(expected_variable, concatenate(variables))
assert_identical(expected_data_array, concatenate(data_arrays))
assert_identical(expected_dataset, concatenate(datasets))
# must also be a core dimension
with pytest.raises(ValueError):
apply_ufunc(identity, variables[0], exclude_dims={"x"})
def test_apply_groupby_add():
array = np.arange(5)
variable = xr.Variable("x", array)
coords = {"x": -array, "y": ("x", [0, 0, 1, 1, 2])}
data_array = xr.DataArray(variable, coords, dims="x")
dataset = xr.Dataset({"z": variable}, coords)
other_variable = xr.Variable("y", [0, 10])
other_data_array = xr.DataArray(other_variable, dims="y")
other_dataset = xr.Dataset({"z": other_variable})
expected_variable = xr.Variable("x", [0, 1, 12, 13, np.nan])
expected_data_array = xr.DataArray(expected_variable, coords, dims="x")
expected_dataset = xr.Dataset({"z": expected_variable}, coords)
assert_identical(
expected_data_array, add(data_array.groupby("y"), other_data_array)
)
assert_identical(expected_dataset, add(data_array.groupby("y"), other_dataset))
assert_identical(expected_dataset, add(dataset.groupby("y"), other_data_array))
assert_identical(expected_dataset, add(dataset.groupby("y"), other_dataset))
# cannot be performed with xarray.Variable objects that share a dimension
with pytest.raises(ValueError):
add(data_array.groupby("y"), other_variable)
# if they are all grouped the same way
with pytest.raises(ValueError):
add(data_array.groupby("y"), data_array[:4].groupby("y"))
with pytest.raises(ValueError):
add(data_array.groupby("y"), data_array[1:].groupby("y"))
with pytest.raises(ValueError):
add(data_array.groupby("y"), other_data_array.groupby("y"))
with pytest.raises(ValueError):
add(data_array.groupby("y"), data_array.groupby("x"))
def test_unified_dim_sizes():
assert unified_dim_sizes([xr.Variable((), 0)]) == {}
assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1])]) == {"x": 1}
assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("y", [1, 2])]) == {
"x": 1,
"y": 2,
}
assert unified_dim_sizes(
[xr.Variable(("x", "z"), [[1]]), xr.Variable(("y", "z"), [[1, 2], [3, 4]])],
exclude_dims={"z"},
) == {"x": 1, "y": 2}
# duplicate dimensions
with pytest.raises(ValueError):
unified_dim_sizes([xr.Variable(("x", "x"), [[1]])])
# mismatched lengths
with pytest.raises(ValueError):
unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1, 2])])
def test_broadcast_compat_data_1d():
data = np.arange(5)
var = xr.Variable("x", data)
assert_identical(data, broadcast_compat_data(var, ("x",), ()))
assert_identical(data, broadcast_compat_data(var, (), ("x",)))
assert_identical(data[:], broadcast_compat_data(var, ("w",), ("x",)))
assert_identical(data[:, None], broadcast_compat_data(var, ("w", "x", "y"), ()))
with pytest.raises(ValueError):
broadcast_compat_data(var, ("x",), ("w",))
with pytest.raises(ValueError):
broadcast_compat_data(var, (), ())
def test_broadcast_compat_data_2d():
data = np.arange(12).reshape(3, 4)
var = xr.Variable(["x", "y"], data)
assert_identical(data, broadcast_compat_data(var, ("x", "y"), ()))
assert_identical(data, broadcast_compat_data(var, ("x",), ("y",)))
assert_identical(data, broadcast_compat_data(var, (), ("x", "y")))
assert_identical(data.T, broadcast_compat_data(var, ("y", "x"), ()))
assert_identical(data.T, broadcast_compat_data(var, ("y",), ("x",)))
assert_identical(data, broadcast_compat_data(var, ("w", "x"), ("y",)))
assert_identical(data, broadcast_compat_data(var, ("w",), ("x", "y")))
assert_identical(data.T, broadcast_compat_data(var, ("w",), ("y", "x")))
assert_identical(
data[:, :, None], broadcast_compat_data(var, ("w", "x", "y", "z"), ())
)
assert_identical(
data[None, :, :].T, broadcast_compat_data(var, ("w", "y", "x", "z"), ())
)
def test_keep_attrs():
def add(a, b, keep_attrs):
if keep_attrs:
return apply_ufunc(operator.add, a, b, keep_attrs=keep_attrs)
else:
return apply_ufunc(operator.add, a, b)
a = xr.DataArray([0, 1], [("x", [0, 1])])
a.attrs["attr"] = "da"
a["x"].attrs["attr"] = "da_coord"
b = xr.DataArray([1, 2], [("x", [0, 1])])
actual = add(a, b, keep_attrs=False)
assert not actual.attrs
actual = add(a, b, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
assert_identical(actual["x"].attrs, a["x"].attrs)
actual = add(a.variable, b.variable, keep_attrs=False)
assert not actual.attrs
actual = add(a.variable, b.variable, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
a = xr.Dataset({"x": [0, 1]})
a.attrs["attr"] = "ds"
a.x.attrs["attr"] = "da"
b = xr.Dataset({"x": [0, 1]})
actual = add(a, b, keep_attrs=False)
assert not actual.attrs
actual = add(a, b, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
assert_identical(actual.x.attrs, a.x.attrs)
def test_dataset_join():
ds0 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
ds1 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]})
# by default, cannot have different labels
with raises_regex(ValueError, "indexes .* are not equal"):
apply_ufunc(operator.add, ds0, ds1)
with raises_regex(TypeError, "must supply"):
apply_ufunc(operator.add, ds0, ds1, dataset_join="outer")
def add(a, b, join, dataset_join):
return apply_ufunc(
operator.add,
a,
b,
join=join,
dataset_join=dataset_join,
dataset_fill_value=np.nan,
)
actual = add(ds0, ds1, "outer", "inner")
expected = xr.Dataset({"a": ("x", [np.nan, 101, np.nan]), "x": [0, 1, 2]})
assert_identical(actual, expected)
actual = add(ds0, ds1, "outer", "outer")
assert_identical(actual, expected)
with raises_regex(ValueError, "data variable names"):
apply_ufunc(operator.add, ds0, xr.Dataset({"b": 1}))
ds2 = xr.Dataset({"b": ("x", [99, 3]), "x": [1, 2]})
actual = add(ds0, ds2, "outer", "inner")
expected = xr.Dataset({"x": [0, 1, 2]})
assert_identical(actual, expected)
# we used np.nan as the fill_value in add() above
actual = add(ds0, ds2, "outer", "outer")
expected = xr.Dataset(
{
"a": ("x", [np.nan, np.nan, np.nan]),
"b": ("x", [np.nan, np.nan, np.nan]),
"x": [0, 1, 2],
}
)
assert_identical(actual, expected)
@requires_dask
def test_apply_dask():
import dask.array as da
array = da.ones((2,), chunks=2)
variable = xr.Variable("x", array)
coords = xr.DataArray(variable).coords.variables
data_array = xr.DataArray(variable, dims=["x"], coords=coords)
dataset = xr.Dataset({"y": variable})
# encountered dask array, but did not set dask='allowed'
with pytest.raises(ValueError):
apply_ufunc(identity, array)
with pytest.raises(ValueError):
apply_ufunc(identity, variable)
with pytest.raises(ValueError):
apply_ufunc(identity, data_array)
with pytest.raises(ValueError):
apply_ufunc(identity, dataset)
# unknown setting for dask array handling
with pytest.raises(ValueError):
apply_ufunc(identity, array, dask="unknown")
def dask_safe_identity(x):
return apply_ufunc(identity, x, dask="allowed")
assert array is dask_safe_identity(array)
actual = dask_safe_identity(variable)
assert isinstance(actual.data, da.Array)
assert_identical(variable, actual)
actual = dask_safe_identity(data_array)
assert isinstance(actual.data, da.Array)
assert_identical(data_array, actual)
actual = dask_safe_identity(dataset)
assert isinstance(actual["y"].data, da.Array)
assert_identical(dataset, actual)
@requires_dask
def test_apply_dask_parallelized_one_arg():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=("x", "y"))
def parallel_identity(x):
return apply_ufunc(identity, x, dask="parallelized", output_dtypes=[x.dtype])
actual = parallel_identity(data_array)
assert isinstance(actual.data, da.Array)
assert actual.data.chunks == array.chunks
assert_identical(data_array, actual)
computed = data_array.compute()
actual = parallel_identity(computed)
assert_identical(computed, actual)
@requires_dask
def test_apply_dask_parallelized_two_args():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1), dtype=np.int64)
data_array = xr.DataArray(array, dims=("x", "y"))
data_array.name = None
def parallel_add(x, y):
return apply_ufunc(
operator.add, x, y, dask="parallelized", output_dtypes=[np.int64]
)
def check(x, y):
actual = parallel_add(x, y)
assert isinstance(actual.data, da.Array)
assert actual.data.chunks == array.chunks
assert_identical(data_array, actual)
check(data_array, 0),
check(0, data_array)
check(data_array, xr.DataArray(0))
check(data_array, 0 * data_array)
check(data_array, 0 * data_array[0])
check(data_array[:, 0], 0 * data_array[0])
check(data_array, 0 * data_array.compute())
@requires_dask
def test_apply_dask_parallelized_errors():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=("x", "y"))
with pytest.raises(NotImplementedError):
apply_ufunc(
identity, data_array, output_core_dims=[["z"], ["z"]], dask="parallelized"
)
with raises_regex(ValueError, "dtypes"):
apply_ufunc(identity, data_array, dask="parallelized")
with raises_regex(TypeError, "list"):
apply_ufunc(identity, data_array, dask="parallelized", output_dtypes=float)
with raises_regex(ValueError, "must have the same length"):
apply_ufunc(
identity, data_array, dask="parallelized", output_dtypes=[float, float]
)
with raises_regex(ValueError, "output_sizes"):
apply_ufunc(
identity,
data_array,
output_core_dims=[["z"]],
output_dtypes=[float],
dask="parallelized",
)
with raises_regex(ValueError, "at least one input is an xarray object"):
apply_ufunc(identity, array, dask="parallelized")
with raises_regex(ValueError, "consists of multiple chunks"):
apply_ufunc(
identity,
data_array,
dask="parallelized",
output_dtypes=[float],
input_core_dims=[("y",)],
output_core_dims=[("y",)],
)
# it's currently impossible to silence these warnings from inside dask.array:
# https://github.com/dask/dask/issues/3245
@requires_dask
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
def test_apply_dask_multiple_inputs():
import dask.array as da
def covariance(x, y):
return (
(x - x.mean(axis=-1, keepdims=True)) * (y - y.mean(axis=-1, keepdims=True))
).mean(axis=-1)
rs = np.random.RandomState(42)
array1 = da.from_array(rs.randn(4, 4), chunks=(2, 4))
array2 = da.from_array(rs.randn(4, 4), chunks=(2, 4))
data_array_1 = xr.DataArray(array1, dims=("x", "z"))
data_array_2 = xr.DataArray(array2, dims=("y", "z"))
expected = apply_ufunc(
covariance,
data_array_1.compute(),
data_array_2.compute(),
input_core_dims=[["z"], ["z"]],
)
allowed = apply_ufunc(
covariance,
data_array_1,
data_array_2,
input_core_dims=[["z"], ["z"]],
dask="allowed",
)
assert isinstance(allowed.data, da.Array)
xr.testing.assert_allclose(expected, allowed.compute())
parallelized = apply_ufunc(
covariance,
data_array_1,
data_array_2,
input_core_dims=[["z"], ["z"]],
dask="parallelized",
output_dtypes=[float],
)
assert isinstance(parallelized.data, da.Array)
xr.testing.assert_allclose(expected, parallelized.compute())
@requires_dask
def test_apply_dask_new_output_dimension():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=("x", "y"))
def stack_negative(obj):
def func(x):
return np.stack([x, -x], axis=-1)
return apply_ufunc(
func,
obj,
output_core_dims=[["sign"]],
dask="parallelized",
output_dtypes=[obj.dtype],
output_sizes={"sign": 2},
)
expected = stack_negative(data_array.compute())
actual = stack_negative(data_array)
assert actual.dims == ("x", "y", "sign")
assert actual.shape == (2, 2, 2)
assert isinstance(actual.data, da.Array)
assert_identical(expected, actual)
def pandas_median(x):
return pd.Series(x).median()
def test_vectorize():
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
expected = xr.DataArray([1, 2], dims=["x"])
actual = apply_ufunc(
pandas_median, data_array, input_core_dims=[["y"]], vectorize=True
)
assert_identical(expected, actual)
@requires_dask
def test_vectorize_dask():
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
expected = xr.DataArray([1, 2], dims=["x"])
actual = apply_ufunc(
pandas_median,
data_array.chunk({"x": 1}),
input_core_dims=[["y"]],
vectorize=True,
dask="parallelized",
output_dtypes=[float],
)
assert_identical(expected, actual)
@requires_dask
def test_vectorize_dask_new_output_dims():
# regression test for GH3574
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y"))
func = lambda x: x[np.newaxis, ...]
expected = data_array.expand_dims("z")
actual = apply_ufunc(
func,
data_array.chunk({"x": 1}),
output_core_dims=[["z"]],
vectorize=True,
dask="parallelized",
output_dtypes=[float],
output_sizes={"z": 1},
).transpose(*expected.dims)
assert_identical(expected, actual)
def test_output_wrong_number():
variable = xr.Variable("x", np.arange(10))
def identity(x):
return x
def tuple3x(x):
return (x, x, x)
with raises_regex(ValueError, "number of outputs"):
apply_ufunc(identity, variable, output_core_dims=[(), ()])
with raises_regex(ValueError, "number of outputs"):
apply_ufunc(tuple3x, variable, output_core_dims=[(), ()])
def test_output_wrong_dims():
variable = xr.Variable("x", np.arange(10))
def add_dim(x):
return x[..., np.newaxis]
def remove_dim(x):
return x[..., 0]
with raises_regex(ValueError, "unexpected number of dimensions"):
apply_ufunc(add_dim, variable, output_core_dims=[("y", "z")])
with raises_regex(ValueError, "unexpected number of dimensions"):
apply_ufunc(add_dim, variable)
with raises_regex(ValueError, "unexpected number of dimensions"):
apply_ufunc(remove_dim, variable)
def test_output_wrong_dim_size():
array = np.arange(10)
variable = xr.Variable("x", array)
data_array = xr.DataArray(variable, [("x", -array)])
dataset = xr.Dataset({"y": variable}, {"x": -array})
def truncate(array):
return array[:5]
def apply_truncate_broadcast_invalid(obj):
return apply_ufunc(truncate, obj)
with raises_regex(ValueError, "size of dimension"):
apply_truncate_broadcast_invalid(variable)
with raises_regex(ValueError, "size of dimension"):
apply_truncate_broadcast_invalid(data_array)
with raises_regex(ValueError, "size of dimension"):
apply_truncate_broadcast_invalid(dataset)
def apply_truncate_x_x_invalid(obj):
return apply_ufunc(
truncate, obj, input_core_dims=[["x"]], output_core_dims=[["x"]]
)
with raises_regex(ValueError, "size of dimension"):
apply_truncate_x_x_invalid(variable)
with raises_regex(ValueError, "size of dimension"):
apply_truncate_x_x_invalid(data_array)
with raises_regex(ValueError, "size of dimension"):
apply_truncate_x_x_invalid(dataset)
def apply_truncate_x_z(obj):
return apply_ufunc(
truncate, obj, input_core_dims=[["x"]], output_core_dims=[["z"]]
)
assert_identical(xr.Variable("z", array[:5]), apply_truncate_x_z(variable))
assert_identical(
xr.DataArray(array[:5], dims=["z"]), apply_truncate_x_z(data_array)
)
assert_identical(xr.Dataset({"y": ("z", array[:5])}), apply_truncate_x_z(dataset))
def apply_truncate_x_x_valid(obj):
return apply_ufunc(
truncate,
obj,
input_core_dims=[["x"]],
output_core_dims=[["x"]],
exclude_dims={"x"},
)
assert_identical(xr.Variable("x", array[:5]), apply_truncate_x_x_valid(variable))
assert_identical(
xr.DataArray(array[:5], dims=["x"]), apply_truncate_x_x_valid(data_array)
)
assert_identical(
xr.Dataset({"y": ("x", array[:5])}), apply_truncate_x_x_valid(dataset)
)
@pytest.mark.parametrize("use_dask", [True, False])
def test_dot(use_dask):
if use_dask:
if not has_dask:
pytest.skip("test for dask.")
a = np.arange(30 * 4).reshape(30, 4)
b = np.arange(30 * 4 * 5).reshape(30, 4, 5)
c = np.arange(5 * 60).reshape(5, 60)
da_a = xr.DataArray(a, dims=["a", "b"], coords={"a": np.linspace(0, 1, 30)})
da_b = xr.DataArray(b, dims=["a", "b", "c"], coords={"a": np.linspace(0, 1, 30)})
da_c = xr.DataArray(c, dims=["c", "e"])
if use_dask:
da_a = da_a.chunk({"a": 3})
da_b = da_b.chunk({"a": 3})
da_c = da_c.chunk({"c": 3})
actual = xr.dot(da_a, da_b, dims=["a", "b"])
assert actual.dims == ("c",)
assert (actual.data == np.einsum("ij,ijk->k", a, b)).all()
assert isinstance(actual.variable.data, type(da_a.variable.data))
actual = xr.dot(da_a, da_b)
assert actual.dims == ("c",)
assert (actual.data == np.einsum("ij,ijk->k", a, b)).all()
assert isinstance(actual.variable.data, type(da_a.variable.data))
# for only a single array is passed without dims argument, just return
# as is
actual = xr.dot(da_a)
assert da_a.identical(actual)
# test for variable
actual = xr.dot(da_a.variable, da_b.variable)
assert actual.dims == ("c",)
assert (actual.data == np.einsum("ij,ijk->k", a, b)).all()
assert isinstance(actual.data, type(da_a.variable.data))
if use_dask:
da_a = da_a.chunk({"a": 3})
da_b = da_b.chunk({"a": 3})
actual = xr.dot(da_a, da_b, dims=["b"])
assert actual.dims == ("a", "c")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
assert isinstance(actual.variable.data, type(da_a.variable.data))
actual = xr.dot(da_a, da_b, dims=["b"])
assert actual.dims == ("a", "c")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
actual = xr.dot(da_a, da_b, dims="b")
assert actual.dims == ("a", "c")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
actual = xr.dot(da_a, da_b, dims="a")
assert actual.dims == ("b", "c")
assert (actual.data == np.einsum("ij,ijk->jk", a, b)).all()
actual = xr.dot(da_a, da_b, dims="c")
assert actual.dims == ("a", "b")
assert (actual.data == np.einsum("ij,ijk->ij", a, b)).all()
actual = xr.dot(da_a, da_b, da_c, dims=["a", "b"])
assert actual.dims == ("c", "e")
assert (actual.data == np.einsum("ij,ijk,kl->kl ", a, b, c)).all()
# should work with tuple
actual = xr.dot(da_a, da_b, dims=("c",))
assert actual.dims == ("a", "b")
assert (actual.data == np.einsum("ij,ijk->ij", a, b)).all()
# default dims
actual = xr.dot(da_a, da_b, da_c)
assert actual.dims == ("e",)
assert (actual.data == np.einsum("ij,ijk,kl->l ", a, b, c)).all()
# 1 array summation
actual = xr.dot(da_a, dims="a")
assert actual.dims == ("b",)
assert (actual.data == np.einsum("ij->j ", a)).all()
# empty dim
actual = xr.dot(da_a.sel(a=[]), da_a.sel(a=[]), dims="a")
assert actual.dims == ("b",)
assert (actual.data == np.zeros(actual.shape)).all()
# Ellipsis (...) sums over all dimensions
actual = xr.dot(da_a, da_b, dims=...)
assert actual.dims == ()
assert (actual.data == np.einsum("ij,ijk->", a, b)).all()
actual = xr.dot(da_a, da_b, da_c, dims=...)
assert actual.dims == ()
assert (actual.data == np.einsum("ij,ijk,kl-> ", a, b, c)).all()
actual = xr.dot(da_a, dims=...)
assert actual.dims == ()
assert (actual.data == np.einsum("ij-> ", a)).all()
actual = xr.dot(da_a.sel(a=[]), da_a.sel(a=[]), dims=...)
assert actual.dims == ()
assert (actual.data == np.zeros(actual.shape)).all()
# Invalid cases
if not use_dask:
with pytest.raises(TypeError):
xr.dot(da_a, dims="a", invalid=None)
with pytest.raises(TypeError):
xr.dot(da_a.to_dataset(name="da"), dims="a")
with pytest.raises(TypeError):
xr.dot(dims="a")
# einsum parameters
actual = xr.dot(da_a, da_b, dims=["b"], order="C")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
assert actual.values.flags["C_CONTIGUOUS"]
assert not actual.values.flags["F_CONTIGUOUS"]
actual = xr.dot(da_a, da_b, dims=["b"], order="F")
assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all()
# dask converts Fortran arrays to C order when merging the final array
if not use_dask:
assert not actual.values.flags["C_CONTIGUOUS"]
assert actual.values.flags["F_CONTIGUOUS"]
# einsum has a constant string as of the first parameter, which makes
# it hard to pass to xarray.apply_ufunc.
# make sure dot() uses functools.partial(einsum, subscripts), which
# can be pickled, and not a lambda, which can't.
pickle.loads(pickle.dumps(xr.dot(da_a)))
@pytest.mark.parametrize("use_dask", [True, False])
def test_dot_align_coords(use_dask):
# GH 3694
if use_dask:
if not has_dask:
pytest.skip("test for dask.")
a = np.arange(30 * 4).reshape(30, 4)
b = np.arange(30 * 4 * 5).reshape(30, 4, 5)
# use partially overlapping coords
coords_a = {"a": np.arange(30), "b": np.arange(4)}
coords_b = {"a": np.arange(5, 35), "b": np.arange(1, 5)}
da_a = xr.DataArray(a, dims=["a", "b"], coords=coords_a)
da_b = xr.DataArray(b, dims=["a", "b", "c"], coords=coords_b)
if use_dask:
da_a = da_a.chunk({"a": 3})
da_b = da_b.chunk({"a": 3})
# join="inner" is the default
actual = xr.dot(da_a, da_b)
# `dot` sums over the common dimensions of the arguments
expected = (da_a * da_b).sum(["a", "b"])
xr.testing.assert_allclose(expected, actual)
actual = xr.dot(da_a, da_b, dims=...)
expected = (da_a * da_b).sum()
xr.testing.assert_allclose(expected, actual)
with xr.set_options(arithmetic_join="exact"):
with raises_regex(ValueError, "indexes along dimension"):
xr.dot(da_a, da_b)
# NOTE: dot always uses `join="inner"` because `(a * b).sum()` yields the same for all
# join method (except "exact")
with xr.set_options(arithmetic_join="left"):
actual = xr.dot(da_a, da_b)
expected = (da_a * da_b).sum(["a", "b"])
xr.testing.assert_allclose(expected, actual)
with xr.set_options(arithmetic_join="right"):
actual = xr.dot(da_a, da_b)
expected = (da_a * da_b).sum(["a", "b"])
xr.testing.assert_allclose(expected, actual)
with xr.set_options(arithmetic_join="outer"):
actual = xr.dot(da_a, da_b)
expected = (da_a * da_b).sum(["a", "b"])
xr.testing.assert_allclose(expected, actual)
def test_where():
cond = xr.DataArray([True, False], dims="x")
actual = xr.where(cond, 1, 0)
expected = xr.DataArray([1, 0], dims="x")
assert_identical(expected, actual)
@pytest.mark.parametrize("use_dask", [True, False])
@pytest.mark.parametrize("use_datetime", [True, False])
def test_polyval(use_dask, use_datetime):
if use_dask and not has_dask:
pytest.skip("requires dask")
if use_datetime:
xcoord = xr.DataArray(
pd.date_range("2000-01-01", freq="D", periods=10), dims=("x",), name="x"
)
x = xr.core.missing.get_clean_interp_index(xcoord, "x")
else:
xcoord = x = np.arange(10)
da = xr.DataArray(
np.stack((1.0 + x + 2.0 * x ** 2, 1.0 + 2.0 * x + 3.0 * x ** 2)),
dims=("d", "x"),
coords={"x": xcoord, "d": [0, 1]},
)
coeffs = xr.DataArray(
[[2, 1, 1], [3, 2, 1]],
dims=("d", "degree"),
coords={"d": [0, 1], "degree": [2, 1, 0]},
)
if use_dask:
coeffs = coeffs.chunk({"d": 2})
da_pv = xr.polyval(da.x, coeffs)
xr.testing.assert_allclose(da, da_pv.T)
|
apache-2.0
|
trankmichael/scikit-learn
|
sklearn/utils/metaestimators.py
|
283
|
2353
|
"""Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
|
bsd-3-clause
|
espenhgn/elephant
|
elephant/current_source_density_src/icsd.py
|
9
|
35175
|
# -*- coding: utf-8 -*-
'''
py-iCSD toolbox!
Translation of the core functionality of the CSDplotter MATLAB package
to python.
The methods were originally developed by Klas H. Pettersen, as described in:
Klas H. Pettersen, Anna Devor, Istvan Ulbert, Anders M. Dale, Gaute T. Einevoll,
Current-source density estimation based on inversion of electrostatic forward
solution: Effects of finite extent of neuronal activity and conductivity
discontinuities, Journal of Neuroscience Methods, Volume 154, Issues 1-2,
30 June 2006, Pages 116-133, ISSN 0165-0270,
http://dx.doi.org/10.1016/j.jneumeth.2005.12.005.
(http://www.sciencedirect.com/science/article/pii/S0165027005004541)
The method themselves are implemented as callable subclasses of the base
CSD class object, which sets some common attributes,
and a basic function for calculating the iCSD, and a generic spatial filter
implementation.
The raw- and filtered CSD estimates are returned as Quantity arrays.
Requires pylab environment to work, i.e numpy+scipy+matplotlib, with the
addition of quantities (http://pythonhosted.org/quantities) and
neo (https://pythonhosted.org/neo)-
Original implementation from CSDplotter-0.1.1
(http://software.incf.org/software/csdplotter) by Klas. H. Pettersen 2005.
Written by:
- [email protected], 2010,
- [email protected], 2015-2016
'''
import numpy as np
import scipy.integrate as si
import scipy.signal as ss
import quantities as pq
class CSD(object):
'''Base iCSD class'''
def __init__(self, lfp, f_type='gaussian', f_order=(3, 1)):
'''Initialize parent class iCSD
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps)
f_type : str
type of spatial filter, must be a scipy.signal filter design method
f_order : list
settings for spatial filter, arg passed to filter design function
'''
self.name = 'CSD estimate parent class'
self.lfp = lfp
self.f_matrix = np.eye(lfp.shape[0]) * pq.m**3 / pq.S
self.f_type = f_type
self.f_order = f_order
def get_csd(self, ):
'''
Perform the CSD estimate from the LFP and forward matrix F, i.e as
CSD=F**-1*LFP
Arguments
---------
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with the csd estimate
'''
csd = np.linalg.solve(self.f_matrix, self.lfp)
return csd * (self.f_matrix.units**-1 * self.lfp.units).simplified
def filter_csd(self, csd, filterfunction='convolve'):
'''
Spatial filtering of the CSD estimate, using an N-point filter
Arguments
---------
csd : np.ndarrray * quantity.Quantity
Array with the csd estimate
filterfunction : str
'filtfilt' or 'convolve'. Apply spatial filter using
scipy.signal.filtfilt or scipy.signal.convolve.
'''
if self.f_type == 'gaussian':
try:
assert(len(self.f_order) == 2)
except AssertionError as ae:
raise ae('filter order f_order must be a tuple of length 2')
else:
try:
assert(self.f_order > 0 and isinstance(self.f_order, int))
except AssertionError as ae:
raise ae('Filter order must be int > 0!')
try:
assert(filterfunction in ['filtfilt', 'convolve'])
except AssertionError as ae:
raise ae("{} not equal to 'filtfilt' or \
'convolve'".format(filterfunction))
if self.f_type == 'boxcar':
num = ss.boxcar(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'hamming':
num = ss.hamming(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'triangular':
num = ss.triang(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'gaussian':
num = ss.gaussian(self.f_order[0], self.f_order[1])
denom = np.array([num.sum()])
elif self.f_type == 'identity':
num = np.array([1.])
denom = np.array([1.])
else:
print('%s Wrong filter type!' % self.f_type)
raise
num_string = '[ '
for i in num:
num_string = num_string + '%.3f ' % i
num_string = num_string + ']'
denom_string = '[ '
for i in denom:
denom_string = denom_string + '%.3f ' % i
denom_string = denom_string + ']'
print(('discrete filter coefficients: \nb = {}, \
\na = {}'.format(num_string, denom_string)))
if filterfunction == 'filtfilt':
return ss.filtfilt(num, denom, csd, axis=0) * csd.units
elif filterfunction == 'convolve':
csdf = csd / csd.units
for i in range(csdf.shape[1]):
csdf[:, i] = ss.convolve(csdf[:, i], num / denom.sum(), 'same')
return csdf * csd.units
class StandardCSD(CSD):
'''
Standard CSD method with and without Vaknin electrodes
'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initialize standard CSD method class with & without Vaknin electrodes.
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m, must be monotonously increasing
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S/m
vaknin_el : bool
flag for using method of Vaknin to endpoint electrodes
Defaults to True
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
diff_diff_coord = np.diff(np.diff(coord_electrode)).magnitude
zeros_ddc = np.zeros_like(diff_diff_coord)
try:
assert(np.all(np.isclose(diff_diff_coord, zeros_ddc, atol=1e-12)))
except AssertionError as ae:
print('coord_electrode not monotonously varying')
raise ae
if self.vaknin_el:
# extend lfps array by duplicating potential at endpoint contacts
if lfp.ndim == 1:
self.lfp = np.empty((lfp.shape[0] + 2, )) * lfp.units
else:
self.lfp = np.empty((lfp.shape[0] + 2, lfp.shape[1])) * lfp.units
self.lfp[0, ] = lfp[0, ]
self.lfp[1:-1, ] = lfp
self.lfp[-1, ] = lfp[-1, ]
else:
self.lfp = lfp
self.name = 'Standard CSD method'
self.coord_electrode = coord_electrode
self.f_inv_matrix = self.get_f_inv_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.vaknin_el = kwargs.pop('vaknin_el', True)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_inv_matrix(self):
'''Calculate the inverse F-matrix for the standard CSD method'''
h_val = abs(np.diff(self.coord_electrode)[0])
f_inv = -np.eye(self.lfp.shape[0])
# Inner matrix elements is just the discrete laplacian coefficients
for j in range(1, f_inv.shape[0] - 1):
f_inv[j, j - 1: j + 2] = np.array([1., -2., 1.])
return f_inv * -self.sigma / h_val
def get_csd(self):
'''
Perform the iCSD calculation, i.e: iCSD=F_inv*LFP
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with the csd estimate
'''
csd = np.dot(self.f_inv_matrix, self.lfp)[1:-1, ]
# `np.dot()` does not return correct units, so the units of `csd` must
# be assigned manually
csd_units = (self.f_inv_matrix.units * self.lfp.units).simplified
csd = csd.magnitude * csd_units
return csd
class DeltaiCSD(CSD):
'''
delta-iCSD method
'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initialize the delta-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float * quantity.Quantity
diamater of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?!
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise ae
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
else:
assert(self.diam > 0 * self.diam.units)
except AssertionError as ae:
print('diam must be positive scalar or of same shape \
as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
self.name = 'delta-iCSD method'
self.coord_electrode = coord_electrode
# initialize F- and iCSD-matrices
self.f_matrix = np.empty((self.coord_electrode.size,
self.coord_electrode.size))
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate the F-matrix'''
f_matrix = np.empty((self.coord_electrode.size,
self.coord_electrode.size)) * self.coord_electrode.units
for j in range(self.coord_electrode.size):
for i in range(self.coord_electrode.size):
f_matrix[j, i] = ((np.sqrt((self.coord_electrode[j] -
self.coord_electrode[i])**2 +
(self.diam[j] / 2)**2) - abs(self.coord_electrode[j] -
self.coord_electrode[i])) +
(self.sigma - self.sigma_top) / (self.sigma +
self.sigma_top) *
(np.sqrt((self.coord_electrode[j] +
self.coord_electrode[i])**2 + (self.diam[j] / 2)**2)-
abs(self.coord_electrode[j] + self.coord_electrode[i])))
f_matrix /= (2 * self.sigma)
return f_matrix
class StepiCSD(CSD):
'''step-iCSD method'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initializing step-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float or np.ndarray * quantity.Quantity
diameter(s) of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
h : float or np.ndarray * quantity.Quantity
assumed thickness of the source cylinders at all or each contact
Defaults to np.ones(15) * 100E-6 * pq.m
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
tol : float
tolerance of numerical integration
Defaults 1e-6
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise ae
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
else:
assert(self.diam > 0 * self.diam.units)
except AssertionError as ae:
print('diam must be positive scalar or of same shape \
as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
try:
assert(self.h.size == 1 or self.h.size == coord_electrode.size)
if self.h.size == coord_electrode.size:
assert(np.all(self.h > 0 * self.h.units))
except AssertionError as ae:
print('h must be scalar or of same shape as coord_electrode')
raise ae
if self.h.size == 1:
self.h = np.ones(coord_electrode.size) * self.h
self.name = 'step-iCSD method'
self.coord_electrode = coord_electrode
# compute forward-solution matrix
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.h = kwargs.pop('h', np.ones(23) * 100E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.tol = kwargs.pop('tol', 1e-6)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate F-matrix for step iCSD method'''
el_len = self.coord_electrode.size
f_matrix = np.zeros((el_len, el_len))
for j in range(el_len):
for i in range(el_len):
lower_int = self.coord_electrode[i] - self.h[j] / 2
if lower_int < 0:
lower_int = self.h[j].units
upper_int = self.coord_electrode[i] + self.h[j] / 2
# components of f_matrix object
f_cyl0 = si.quad(self._f_cylinder,
a=lower_int, b=upper_int,
args=(float(self.coord_electrode[j]),
float(self.diam[j]),
float(self.sigma)),
epsabs=self.tol)[0]
f_cyl1 = si.quad(self._f_cylinder, a=lower_int, b=upper_int,
args=(-float(self.coord_electrode[j]),
float(self.diam[j]), float(self.sigma)),
epsabs=self.tol)[0]
# method of images coefficient
mom = (self.sigma - self.sigma_top) / (self.sigma + self.sigma_top)
f_matrix[j, i] = f_cyl0 + mom * f_cyl1
# assume si.quad trash the units
return f_matrix * self.h.units**2 / self.sigma.units
def _f_cylinder(self, zeta, z_val, diam, sigma):
'''function used by class method'''
f_cyl = 1. / (2. * sigma) * \
(np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
return f_cyl
class SplineiCSD(CSD):
'''spline iCSD method'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initializing spline-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float * quantity.Quantity
diamater of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
tol : float
tolerance of numerical integration
Defaults 1e-6
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
num_steps : int
number of data points for the spatially upsampled LFP/CSD data
Defaults to 200
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?!
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
except AssertionError as ae:
print('diam must be scalar or of same shape as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
self.name = 'spline-iCSD method'
self.coord_electrode = coord_electrode
# compute stuff
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.tol = kwargs.pop('tol', 1e-6)
self.num_steps = kwargs.pop('num_steps', 200)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate the F-matrix for cubic spline iCSD method'''
el_len = self.coord_electrode.size
z_js = np.zeros(el_len + 1)
z_js[:-1] = np.array(self.coord_electrode)
z_js[-1] = z_js[-2] + float(np.diff(self.coord_electrode).mean())
# Define integration matrixes
f_mat0 = np.zeros((el_len, el_len + 1))
f_mat1 = np.zeros((el_len, el_len + 1))
f_mat2 = np.zeros((el_len, el_len + 1))
f_mat3 = np.zeros((el_len, el_len + 1))
# Calc. elements
for j in range(el_len):
for i in range(el_len):
f_mat0[j, i] = si.quad(self._f_mat0, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat1[j, i] = si.quad(self._f_mat1, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat2[j, i] = si.quad(self._f_mat2, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat3[j, i] = si.quad(self._f_mat3, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
# image technique if conductivity not constant:
if self.sigma != self.sigma_top:
f_mat0[j, i] = f_mat0[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat0, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1],
float(self.sigma), float(self.diam[j])), \
epsabs=self.tol)[0]
f_mat1[j, i] = f_mat1[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat1, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
f_mat2[j, i] = f_mat2[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat2, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
f_mat3[j, i] = f_mat3[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat3, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
e_mat0, e_mat1, e_mat2, e_mat3 = self._calc_e_matrices()
# Calculate the F-matrix
f_matrix = np.eye(el_len + 2)
f_matrix[1:-1, :] = np.dot(f_mat0, e_mat0) + \
np.dot(f_mat1, e_mat1) + \
np.dot(f_mat2, e_mat2) + \
np.dot(f_mat3, e_mat3)
return f_matrix * self.coord_electrode.units**2 / self.sigma.units
def get_csd(self):
'''
Calculate the iCSD using the spline iCSD method
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with csd estimate
'''
e_mat = self._calc_e_matrices()
el_len = self.coord_electrode.size
# padding the lfp with zeros on top/bottom
if self.lfp.ndim == 1:
cs_lfp = np.r_[[0], np.asarray(self.lfp), [0]].reshape(1, -1).T
csd = np.zeros(self.num_steps)
else:
cs_lfp = np.vstack((np.zeros(self.lfp.shape[1]),
np.asarray(self.lfp),
np.zeros(self.lfp.shape[1])))
csd = np.zeros((self.num_steps, self.lfp.shape[1]))
cs_lfp *= self.lfp.units
# CSD coefficients
csd_coeff = np.linalg.solve(self.f_matrix, cs_lfp)
# The cubic spline polynomial coefficients
a_mat0 = np.dot(e_mat[0], csd_coeff)
a_mat1 = np.dot(e_mat[1], csd_coeff)
a_mat2 = np.dot(e_mat[2], csd_coeff)
a_mat3 = np.dot(e_mat[3], csd_coeff)
# Extend electrode coordinates in both end by min contact interdistance
h = np.diff(self.coord_electrode).min()
z_js = np.zeros(el_len + 2)
z_js[0] = self.coord_electrode[0] - h
z_js[1: -1] = self.coord_electrode
z_js[-1] = self.coord_electrode[-1] + h
# create high res spatial grid
out_zs = np.linspace(z_js[1], z_js[-2], self.num_steps)
# Calculate iCSD estimate on grid from polynomial coefficients.
i = 0
for j in range(self.num_steps):
if out_zs[j] >= z_js[i + 1]:
i += 1
csd[j, ] = a_mat0[i, :] + a_mat1[i, :] * \
(out_zs[j] - z_js[i]) + \
a_mat2[i, :] * (out_zs[j] - z_js[i])**2 + \
a_mat3[i, :] * (out_zs[j] - z_js[i])**3
csd_unit = (self.f_matrix.units**-1 * self.lfp.units).simplified
return csd * csd_unit
def _f_mat0(self, zeta, z_val, sigma, diam):
'''0'th order potential function'''
return 1. / (2. * sigma) * \
(np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
def _f_mat1(self, zeta, z_val, zi_val, sigma, diam):
'''1'th order potential function'''
return (zeta - zi_val) * self._f_mat0(zeta, z_val, sigma, diam)
def _f_mat2(self, zeta, z_val, zi_val, sigma, diam):
'''2'nd order potential function'''
return (zeta - zi_val)**2 * self._f_mat0(zeta, z_val, sigma, diam)
def _f_mat3(self, zeta, z_val, zi_val, sigma, diam):
'''3'rd order potential function'''
return (zeta - zi_val)**3 * self._f_mat0(zeta, z_val, sigma, diam)
def _calc_k_matrix(self):
'''Calculate the K-matrix used by to calculate E-matrices'''
el_len = self.coord_electrode.size
h = float(np.diff(self.coord_electrode).min())
c_jm1 = np.eye(el_len + 2, k=0) / h
c_jm1[0, 0] = 0
c_j0 = np.eye(el_len + 2) / h
c_j0[-1, -1] = 0
c_jall = c_j0
c_jall[0, 0] = 1
c_jall[-1, -1] = 1
tjp1 = np.eye(el_len + 2, k=1)
tjm1 = np.eye(el_len + 2, k=-1)
tj0 = np.eye(el_len + 2)
tj0[0, 0] = 0
tj0[-1, -1] = 0
# Defining K-matrix used to calculate e_mat1-3
return np.dot(np.linalg.inv(np.dot(c_jm1, tjm1) +
2 * np.dot(c_jm1, tj0) +
2 * c_jall +
np.dot(c_j0, tjp1)),
3 * (np.dot(np.dot(c_jm1, c_jm1), tj0) -
np.dot(np.dot(c_jm1, c_jm1), tjm1) +
np.dot(np.dot(c_j0, c_j0), tjp1) -
np.dot(np.dot(c_j0, c_j0), tj0)))
def _calc_e_matrices(self):
'''Calculate the E-matrices used by cubic spline iCSD method'''
el_len = self.coord_electrode.size
# expanding electrode grid
h = float(np.diff(self.coord_electrode).min())
# Define transformation matrices
c_mat3 = np.eye(el_len + 1) / h
# Get K-matrix
k_matrix = self._calc_k_matrix()
# Define matrixes for C to A transformation:
tja = np.eye(el_len + 2)[:-1, ]
tjp1a = np.eye(el_len + 2, k=1)[:-1, ]
# Define spline coefficients
e_mat0 = tja
e_mat1 = np.dot(tja, k_matrix)
e_mat2 = 3 * np.dot(c_mat3**2, (tjp1a - tja)) - \
np.dot(np.dot(c_mat3, (tjp1a + 2 * tja)), k_matrix)
e_mat3 = 2 * np.dot(c_mat3**3, (tja - tjp1a)) + \
np.dot(np.dot(c_mat3**2, (tjp1a + tja)), k_matrix)
return e_mat0, e_mat1, e_mat2, e_mat3
if __name__ == '__main__':
from scipy.io import loadmat
import matplotlib.pyplot as plt
#loading test data
test_data = loadmat('test_data.mat')
#prepare lfp data for use, by changing the units to SI and append quantities,
#along with electrode geometry, conductivities and assumed source geometry
lfp_data = test_data['pot1'] * 1E-6 * pq.V # [uV] -> [V]
z_data = np.linspace(100E-6, 2300E-6, 23) * pq.m # [m]
diam = 500E-6 * pq.m # [m]
h = 100E-6 * pq.m # [m]
sigma = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)]
sigma_top = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)]
# Input dictionaries for each method
delta_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam, # source diameter
'sigma' : sigma, # extracellular conductivity
'sigma_top' : sigma, # conductivity on top of cortex
'f_type' : 'gaussian', # gaussian filter
'f_order' : (3, 1), # 3-point filter, sigma = 1.
}
step_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam,
'h' : h, # source thickness
'sigma' : sigma,
'sigma_top' : sigma,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
spline_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam,
'sigma' : sigma,
'sigma_top' : sigma,
'num_steps' : 201, # Spatial CSD upsampling to N steps
'tol' : 1E-12,
'f_type' : 'gaussian',
'f_order' : (20, 5),
}
std_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'sigma' : sigma,
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
#Create the different CSD-method class instances. We use the class methods
#get_csd() and filter_csd() below to get the raw and spatially filtered
#versions of the current-source density estimates.
csd_dict = dict(
delta_icsd = DeltaiCSD(**delta_input),
step_icsd = StepiCSD(**step_input),
spline_icsd = SplineiCSD(**spline_input),
std_csd = StandardCSD(**std_input),
)
#plot
for method, csd_obj in list(csd_dict.items()):
fig, axes = plt.subplots(3,1, figsize=(8,8))
#plot LFP signal
ax = axes[0]
im = ax.imshow(np.array(lfp_data), origin='upper', vmin=-abs(lfp_data).max(), \
vmax=abs(lfp_data).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
cb = plt.colorbar(im, ax=ax)
cb.set_label('LFP (%s)' % lfp_data.dimensionality.string)
ax.set_xticklabels([])
ax.set_title('LFP')
ax.set_ylabel('ch #')
#plot raw csd estimate
csd = csd_obj.get_csd()
ax = axes[1]
im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
ax.set_title(csd_obj.name)
cb = plt.colorbar(im, ax=ax)
cb.set_label('CSD (%s)' % csd.dimensionality.string)
ax.set_xticklabels([])
ax.set_ylabel('ch #')
#plot spatially filtered csd estimate
ax = axes[2]
csd = csd_obj.filter_csd(csd)
im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
ax.set_title(csd_obj.name + ', filtered')
cb = plt.colorbar(im, ax=ax)
cb.set_label('CSD (%s)' % csd.dimensionality.string)
ax.set_ylabel('ch #')
ax.set_xlabel('timestep')
plt.show()
|
bsd-3-clause
|
littlezz/ESL-Model
|
esl_model/datasets/data_set.py
|
1
|
3913
|
import pandas as pd
from pandas import read_csv
from os.path import join, dirname
__all__ = ['ProstateDataSet', 'VowelDataSet', 'SAHeartDataSet', 'ZipCodeDataSet']
class BaseDataSet:
data_path = ''
multi_data = False
def __init__(self, select_features=None):
self._train_x = None
self._train_y = None
self._test_x = None
self._test_y = None
self.feature_names = None
self.select_features = select_features
if self.multi_data:
self.df = map(self.read_data, self.data_path)
else:
self.df = self.read_data(self.data_path)
self._process_data()
@staticmethod
def read_data(path):
filename = join(dirname(__file__), path)
return read_csv(filename)
def _process_data(self):
raise NotImplementedError
def return_all(self, ret_feature_names=True):
"""
all data sets
:param ret_feature_names:
:return:
"""
ret = (self.train_x, self.train_y, self.test_x, self.test_y, self.feature_names)
return ret if ret_feature_names else ret[:-1]
def select_x(self, x: pd.DataFrame):
"""
select special column by features name or column number
"""
if not self.feature_names or not self.select_features:
return x
if isinstance(self.select_features[0], int):
return x.iloc[:, self.select_features]
elif all(f in self.feature_names for f in self.select_features):
return x.loc[:, self.select_features]
else:
raise KeyError('Not find features in {}'.format(self.select_features))
@property
def train_x(self):
return self.select_x(self._train_x).values
@property
def train_y(self):
return self._train_y.values
@property
def test_x(self):
return self.select_x(self._test_x).values
@property
def test_y(self):
return self._test_y.values
class ProstateDataSet(BaseDataSet):
data_path = 'data/prostate.csv'
def _process_data(self):
df = self.df
train = self.df[self.df.train == 'T'].iloc[:, :-1]
test = self.df[self.df.train == 'F'].iloc[:, :-1]
self._train_x, self._test_x = train.iloc[:, :-1], test.iloc[:, :-1]
self._train_y, self._test_y = train.iloc[:, -1], test.iloc[:, -1]
self.feature_names = list(df.columns[:-1])
class VowelDataSet(BaseDataSet):
data_path = ['data/vowel.train.csv', 'data/vowel.test.csv']
multi_data = True
def _process_data(self):
train, test = self.df
train = train.drop(train.columns[0], axis=1)
self._train_y = train.pop('y')
self._train_x = train
test = test.drop(test.columns[0], axis=1)
self._test_y = test.pop('y')
self._test_x = test
self.feature_names = list(train.columns)
class SAHeartDataSet(BaseDataSet):
"""
There is not test data in this DataSet
"""
data_path = 'data/SAheart.data.csv'
def _process_data(self):
df = self.df
train = df.drop(df.columns[0], axis=1)
self._train_y = train.pop('chd')
train['famhist'] = pd.Categorical(train['famhist']).codes
self._train_x = train
self.feature_names = list(train.columns)
# empty test data set
self._test_x = self._test_y = pd.DataFrame()
class ZipCodeDataSet(BaseDataSet):
multi_data = True
data_path = ['data/zip.train.gz', 'data/zip.test.gz']
@staticmethod
def read_data(path):
filename = join(dirname(__file__), path)
return read_csv(filename, sep=' ', header=None)
def _process_data(self):
train, test = self.df
self._train_x = train.iloc[:, 1:257]
self._train_y = train.iloc[:, 0]
self._test_x = test.iloc[:, 1:257]
self._test_y = test.iloc[:, 0]
|
mit
|
poryfly/scikit-learn
|
examples/bicluster/plot_spectral_coclustering.py
|
276
|
1736
|
"""
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
|
bsd-3-clause
|
wazeerzulfikar/scikit-learn
|
sklearn/datasets/mldata.py
|
32
|
8031
|
"""Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home
from ..utils import Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename.
Parameters
----------
dataname : str
Name of dataset
Returns
-------
fname : str
The converted dataname.
"""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname : str
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name : optional, default: 'label'
Name or index of the column containing the target values.
data_name : optional, default: 'data'
Name or index of the column containing the data.
transpose_data : optional, default: True
If True, transpose the downloaded data array.
data_home : optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the scikit-learn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to scikit-learn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by test runners to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
|
bsd-3-clause
|
rseubert/scikit-learn
|
sklearn/linear_model/tests/test_theil_sen.py
|
234
|
9928
|
"""
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
|
bsd-3-clause
|
nelson-liu/scikit-learn
|
examples/calibration/plot_calibration_multiclass.py
|
95
|
6971
|
"""
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
|
bsd-3-clause
|
dongjoon-hyun/spark
|
python/pyspark/sql/pandas/group_ops.py
|
23
|
14683
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark.rdd import PythonEvalType
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame
class PandasGroupedOpsMixin(object):
"""
Min-in for pandas grouped operations. Currently, only :class:`GroupedData`
can use this class.
"""
def apply(self, udf):
"""
It is an alias of :meth:`pyspark.sql.GroupedData.applyInPandas`; however, it takes a
:meth:`pyspark.sql.functions.pandas_udf` whereas
:meth:`pyspark.sql.GroupedData.applyInPandas` takes a Python native function.
.. versionadded:: 2.3.0
Parameters
----------
udf : :func:`pyspark.sql.functions.pandas_udf`
a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
Notes
-----
It is preferred to use :meth:`pyspark.sql.GroupedData.applyInPandas` over this
API. This API will be deprecated in the future releases.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
See Also
--------
pyspark.sql.functions.pandas_udf
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
warnings.warn(
"It is preferred to use 'applyInPandas' over this "
"API. This API will be deprecated in the future releases. See SPARK-28264 for "
"more details.", UserWarning)
return self.applyInPandas(udf.func, schema=udf.returnType)
def applyInPandas(self, func, schema):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes a `pandas.DataFrame`, and outputs a
`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, ceil
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").applyInPandas(
... normalize, schema="id long, v double").show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can pass a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def mean_func(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').applyInPandas(
... mean_func, schema="id long, v double").show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> def sum_func(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).applyInPandas(
... sum_func, schema="id long, `ceil(v / 2)` long, v double").show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
Notes
-----
This function requires a full shuffle. All the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
This API is experimental.
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql import GroupedData
from pyspark.sql.functions import pandas_udf, PandasUDFType
assert isinstance(self, GroupedData)
udf = pandas_udf(
func, returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def cogroup(self, other):
"""
Cogroups this group with another group so that we can run cogrouped operations.
.. versionadded:: 3.0.0
See :class:`PandasCogroupedOps` for the operations that can be run.
"""
from pyspark.sql import GroupedData
assert isinstance(self, GroupedData)
return PandasCogroupedOps(self, other)
class PandasCogroupedOps(object):
"""
A logical grouping of two :class:`GroupedData`,
created by :func:`GroupedData.cogroup`.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental.
"""
def __init__(self, gd1, gd2):
self._gd1 = gd1
self._gd2 = gd2
self.sql_ctx = gd1.sql_ctx
def applyInPandas(self, func, schema):
"""
Applies a function to each cogroup using pandas and returns the result
as a `DataFrame`.
The function should take two `pandas.DataFrame`\\s and return another
`pandas.DataFrame`. For each side of the cogroup, all columns are passed together as a
`pandas.DataFrame` to the user-function and the returned `pandas.DataFrame` are combined as
a :class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes two `pandas.DataFrame`\\s, and
outputs a `pandas.DataFrame`, or that takes one tuple (grouping keys) and two
pandas ``DataFrame``\\s, and outputs a pandas ``DataFrame``.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf
>>> df1 = spark.createDataFrame(
... [(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
... ("time", "id", "v1"))
>>> df2 = spark.createDataFrame(
... [(20000101, 1, "x"), (20000101, 2, "y")],
... ("time", "id", "v2"))
>>> def asof_join(l, r):
... return pd.merge_asof(l, r, on="time", by="id")
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, schema="time int, id int, v1 double, v2 string"
... ).show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
|20000101| 2|2.0| y|
|20000102| 2|4.0| y|
+--------+---+---+---+
Alternatively, the user can define a function that takes three arguments. In this case,
the grouping key(s) will be passed as the first argument and the data will be passed as the
second and third arguments. The grouping key(s) will be passed as a tuple of numpy data
types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in as two
`pandas.DataFrame` containing all columns from the original Spark DataFrames.
>>> def asof_join(k, l, r):
... if k == (1,):
... return pd.merge_asof(l, r, on="time", by="id")
... else:
... return pd.DataFrame(columns=['time', 'id', 'v1', 'v2'])
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, "time int, id int, v1 double, v2 string").show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
+--------+---+---+---+
Notes
-----
This function requires a full shuffle. All the data of a cogroup will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
This API is experimental.
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql.pandas.functions import pandas_udf
udf = pandas_udf(
func, returnType=schema, functionType=PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF)
all_cols = self._extract_cols(self._gd1) + self._extract_cols(self._gd2)
udf_column = udf(*all_cols)
jdf = self._gd1._jgd.flatMapCoGroupsInPandas(self._gd2._jgd, udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
@staticmethod
def _extract_cols(gd):
df = gd._df
return [df[col] for col in df.columns]
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.group_ops
globs = pyspark.sql.pandas.group_ops.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.group tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.group_ops, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
x-mengao/x-mengao.github.io
|
markdown_generator/talks.py
|
199
|
4000
|
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
|
mit
|
lin-credible/scikit-learn
|
sklearn/tests/test_cross_validation.py
|
70
|
41943
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with warnings.catch_warnings(record=True):
# deprecated sequence of sequence format
cv = cval.check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval.check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
|
bsd-3-clause
|
jbloomlab/dms_tools2
|
tests/test_batch_diffsel.py
|
1
|
3976
|
"""Tests ``dms2_batch_diffsel``.
Written by Jesse Bloom."""
import sys
import os
import unittest
import subprocess
import random
import numpy
import pandas
class test_batch_diffsel(unittest.TestCase):
"""Runs ``dms2_batch_diffsel`` on test data."""
def setUp(self):
"""Set up input data."""
self.testdir = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'./test_batch_diffsel_files/')
if not os.path.isdir(self.testdir):
os.mkdir(self.testdir)
self.indir = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'./diffsel_input_files/')
self.summaryprefix = 'summary'
self.names = ['c1', 'c3']
self.expected = {}
for name in self.names:
for seltype in ['mutdiffsel', 'sitediffsel']:
self.expected[(name, seltype)] = os.path.join(self.indir,
'./expected_output/',
'errors-{0}-mincounts0_{1}.csv'.format(name, seltype))
assert os.path.isfile(self.expected[(name, seltype)])
# define output files
self.outfiles = [self.summaryprefix + suffix for suffix in
['.log'] +
['_' + avgtype + diffseltype + '.pdf'
for avgtype in ['mean', 'median']
for diffseltype in ['maxdiffsel', 'minmaxdiffsel',
'positivediffsel', 'totaldiffsel']]
] + \
[self.summaryprefix + '_H17L19-' + suffix for suffix in
[seltype + 'diffselcorr.pdf' for seltype in
['absolutesite', 'maxmut', 'mut', 'positivesite']] +
[seltype + 'diffsel.csv' for seltype in
['meanmut', 'meansite', 'medianmut', 'mediansite']]]
self.outfiles = [os.path.join(self.testdir, f) for f in self.outfiles]
for f in self.outfiles:
if os.path.isfile(f):
os.remove(f)
def test_dms2_batch_diffsel(self):
"""Runs ``dms2_batch_diffsel`` on test data."""
batchfile = os.path.join(self.testdir, 'batch.csv')
df = pandas.DataFrame({
'group':['H17L19', 'H17L19'],
'name':self.names,
'sel':['L1_H17L19_{0}_r1counts.csv'.format(n)
for n in self.names],
'mock':['L1_mock_r1counts.csv'] * 2,
'err':['err_counts.csv'] * 2,
})
df.to_csv(batchfile, index=False)
cmds = [
'dms2_batch_diffsel',
'--batchfile', batchfile,
'--summaryprefix', self.summaryprefix,
'--outdir', self.testdir,
'--indir', self.indir,
'--pseudocount', '10',
]
sys.stderr.write('\nRunning the following command:\n{0}\n'.format(
' '.join(cmds)))
subprocess.check_call(cmds)
for f in self.outfiles:
self.assertTrue(os.path.isfile(f), "Failed to create {0}".format(f))
for name in self.names:
for seltype in ['mutdiffsel', 'sitediffsel']:
expected = pandas.read_csv(self.expected[(name, seltype)])
actual = pandas.read_csv(os.path.join(self.testdir,
'H17L19-{0}_{1}.csv'.format(name, seltype)))
self.assertTrue((expected.columns == actual.columns).all())
for col in expected.columns:
if pandas.api.types.is_numeric_dtype(actual[col]):
self.assertTrue(numpy.allclose(expected[col],
actual[col], equal_nan=True))
else:
self.assertTrue((expected[col] == actual[col]).all())
if __name__ == '__main__':
runner = unittest.TextTestRunner()
unittest.main(testRunner=runner)
|
gpl-3.0
|
perimosocordiae/scipy
|
scipy/stats/_discrete_distns.py
|
3
|
50628
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from functools import partial
from scipy import special
from scipy.special import entr, logsumexp, betaln, gammaln as gamln, zeta
from scipy._lib._util import _lazywhere, rng_integers
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names,
_check_shape)
import scipy.stats._boost as _boost
from .biasedurn import (_PyFishersNCHypergeometric,
_PyWalleniusNCHypergeometric,
_PyStochasticLib3)
class binom_gen(rv_discrete):
r"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is:
.. math::
f(k) = \binom{n}{k} p^k (1-p)^{n-k}
for :math:`k \in \{0, 1, \dots, n\}`, :math:`0 \leq p \leq 1`
`binom` takes :math:`n` and :math:`p` as shape parameters,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
%(example)s
See Also
--------
hypergeom, nbinom, nhypergeom
"""
def _rvs(self, n, p, size=None, random_state=None):
return random_state.binomial(n, p, size)
def _argcheck(self, n, p):
return (n >= 0) & (p >= 0) & (p <= 1)
def _get_support(self, n, p):
return self.a, n
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
# binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
return _boost._binom_pdf(x, n, p)
def _cdf(self, x, n, p):
k = floor(x)
return _boost._binom_cdf(k, n, p)
def _sf(self, x, n, p):
k = floor(x)
return _boost._binom_sf(k, n, p)
def _isf(self, x, n, p):
return _boost._binom_isf(x, n, p)
def _ppf(self, q, n, p):
return _boost._binom_ppf(q, n, p)
def _stats(self, n, p, moments='mv'):
mu = _boost._binom_mean(n, p)
var = _boost._binom_variance(n, p)
g1, g2 = None, None
if 's' in moments:
g1 = _boost._binom_skewness(n, p)
if 'k' in moments:
g2 = _boost._binom_kurtosis_excess(n, p)
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
r"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is:
.. math::
f(k) = \begin{cases}1-p &\text{if } k = 0\\
p &\text{if } k = 1\end{cases}
for :math:`k` in :math:`\{0, 1\}`, :math:`0 \leq p \leq 1`
`bernoulli` takes :math:`p` as shape parameter,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
%(example)s
"""
def _rvs(self, p, size=None, random_state=None):
return binom_gen._rvs(self, 1, p, size=size, random_state=random_state)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _get_support(self, p):
# Overrides binom_gen._get_support!x
return self.a, self.b
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
# bernoulli.pmf(k) = 1-p if k = 0
# = p if k = 1
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _isf(self, x, p):
return binom._isf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class betabinom_gen(rv_discrete):
r"""A beta-binomial discrete random variable.
%(before_notes)s
Notes
-----
The beta-binomial distribution is a binomial distribution with a
probability of success `p` that follows a beta distribution.
The probability mass function for `betabinom` is:
.. math::
f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)}
for :math:`k \in \{0, 1, \dots, n\}`, :math:`n \geq 0`, :math:`a > 0`,
:math:`b > 0`, where :math:`B(a, b)` is the beta function.
`betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
References
----------
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
%(after_notes)s
.. versionadded:: 1.4.0
See Also
--------
beta, binom
%(example)s
"""
def _rvs(self, n, a, b, size=None, random_state=None):
p = random_state.beta(a, b, size)
return random_state.binomial(n, p, size)
def _get_support(self, n, a, b):
return 0, n
def _argcheck(self, n, a, b):
return (n >= 0) & (a > 0) & (b > 0)
def _logpmf(self, x, n, a, b):
k = floor(x)
combiln = -log(n + 1) - betaln(n - k + 1, k + 1)
return combiln + betaln(k + a, n - k + b) - betaln(a, b)
def _pmf(self, x, n, a, b):
return exp(self._logpmf(x, n, a, b))
def _stats(self, n, a, b, moments='mv'):
e_p = a / (a + b)
e_q = 1 - e_p
mu = n * e_p
var = n * (a + b + n) * e_p * e_q / (a + b + 1)
g1, g2 = None, None
if 's' in moments:
g1 = 1.0 / sqrt(var)
g1 *= (a + b + 2 * n) * (b - a)
g1 /= (a + b + 2) * (a + b)
if 'k' in moments:
g2 = a + b
g2 *= (a + b - 1 + 6 * n)
g2 += 3 * a * b * (n - 2)
g2 += 6 * n ** 2
g2 -= 3 * e_p * b * n * (6 - n)
g2 -= 18 * e_p * e_q * n ** 2
g2 *= (a + b) ** 2 * (1 + a + b)
g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n))
g2 -= 3
return mu, var, g1, g2
betabinom = betabinom_gen(name='betabinom')
class nbinom_gen(rv_discrete):
r"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
trials, repeated until a predefined, non-random number of successes occurs.
The probability mass function of the number of failures for `nbinom` is:
.. math::
f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k
for :math:`k \ge 0`, :math:`0 < p \leq 1`
`nbinom` takes :math:`n` and :math:`p` as shape parameters where n is the
number of successes, :math:`p` is the probability of a single success,
and :math:`1-p` is the probability of a single failure.
Another common parameterization of the negative binomial distribution is
in terms of the mean number of failures :math:`\mu` to achieve :math:`n`
successes. The mean :math:`\mu` is related to the probability of success
as
.. math::
p = \frac{n}{n + \mu}
The number of successes :math:`n` may also be specified in terms of a
"dispersion", "heterogeneity", or "aggregation" parameter :math:`\alpha`,
which relates the mean :math:`\mu` to the variance :math:`\sigma^2`,
e.g. :math:`\sigma^2 = \mu + \alpha \mu^2`. Regardless of the convention
used for :math:`\alpha`,
.. math::
p &= \frac{\mu}{\sigma^2} \\
n &= \frac{\mu^2}{\sigma^2 - \mu}
%(after_notes)s
%(example)s
See Also
--------
hypergeom, binom, nhypergeom
"""
def _rvs(self, n, p, size=None, random_state=None):
return random_state.negative_binomial(n, p, size)
def _argcheck(self, n, p):
return (n > 0) & (p > 0) & (p <= 1)
def _pmf(self, x, n, p):
# nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
return _boost._nbinom_pdf(x, n, p)
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return _boost._nbinom_cdf(k, n, p)
def _logcdf(self, x, n, p):
k = floor(x)
cdf = self._cdf(k, n, p)
cond = cdf > 0.5
def f1(k, n, p):
return np.log1p(-special.betainc(k + 1, n, 1 - p))
def f2(k, n, p):
return np.log(cdf)
with np.errstate(divide='ignore'):
return _lazywhere(cond, (x, n, p), f=f1, f2=f2)
def _sf(self, x, n, p):
k = floor(x)
return _boost._nbinom_sf(k, n, p)
def _isf(self, x, n, p):
return _boost._nbinom_isf(x, n, p)
def _ppf(self, q, n, p):
return _boost._nbinom_ppf(q, n, p)
def _stats(self, n, p):
return(
_boost._nbinom_mean(n, p),
_boost._nbinom_variance(n, p),
_boost._nbinom_skewness(n, p),
_boost._nbinom_kurtosis_excess(n, p),
)
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
r"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is:
.. math::
f(k) = (1-p)^{k-1} p
for :math:`k \ge 1`, :math:`0 < p \leq 1`
`geom` takes :math:`p` as shape parameter,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
See Also
--------
planck
%(example)s
"""
def _rvs(self, p, size=None, random_state=None):
return random_state.geometric(p, size=size)
def _argcheck(self, p):
return (p <= 1) & (p > 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log1p(-q) / log1p(-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
r"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
`M` is the total number of objects, `n` is total number of Type I objects.
The random variate represents the number of Type I objects in `N` drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not
universally accepted. See the Examples for a clarification of the
definitions used here.
The probability mass function is defined as,
.. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}}
{\binom{M}{N}}
for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial
coefficients are defined as,
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
See Also
--------
nhypergeom, binom, nbinom
"""
def _rvs(self, M, n, N, size=None, random_state=None):
return random_state.hypergeometric(n, M-n, N, size=size)
def _get_support(self, M, n, N):
return np.maximum(N-(M-n), 0), np.minimum(n, N)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) -
betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) -
betaln(tot+1, 1))
return result
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
res = []
for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5):
# Less terms to sum if we calculate log(1-cdf)
res.append(log1p(-exp(self.logcdf(quant, tot, good, draw))))
else:
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
def _logcdf(self, k, M, n, N):
res = []
for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5):
# Less terms to sum if we calculate log(1-sf)
res.append(log1p(-exp(self.logsf(quant, tot, good, draw))))
else:
# Integration over probability mass function using logsumexp
k2 = np.arange(0, quant + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
class nhypergeom_gen(rv_discrete):
r"""A negative hypergeometric discrete random variable.
Consider a box containing :math:`M` balls:, :math:`n` red and
:math:`M-n` blue. We randomly sample balls from the box, one
at a time and *without* replacement, until we have picked :math:`r`
blue balls. `nhypergeom` is the distribution of the number of
red balls :math:`k` we have picked.
%(before_notes)s
Notes
-----
The symbols used to denote the shape parameters (`M`, `n`, and `r`) are not
universally accepted. See the Examples for a clarification of the
definitions used here.
The probability mass function is defined as,
.. math:: f(k; M, n, r) = \frac{{{k+r-1}\choose{k}}{{M-r-k}\choose{n-k}}}
{{M \choose n}}
for :math:`k \in [0, n]`, :math:`n \in [0, M]`, :math:`r \in [0, M-n]`,
and the binomial coefficient is:
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
It is equivalent to observing :math:`k` successes in :math:`k+r-1`
samples with :math:`k+r`'th sample being a failure. The former
can be modelled as a hypergeometric distribution. The probability
of the latter is simply the number of failures remaining
:math:`M-n-(r-1)` divided by the size of the remaining population
:math:`M-(k+r-1)`. This relationship can be shown as:
.. math:: NHG(k;M,n,r) = HG(k;M,n,k+r-1)\frac{(M-n-(r-1))}{(M-(k+r-1))}
where :math:`NHG` is probability mass function (PMF) of the
negative hypergeometric distribution and :math:`HG` is the
PMF of the hypergeometric distribution.
%(after_notes)s
Examples
--------
>>> from scipy.stats import nhypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs.
Then if we want to know the probability of finding a given number
of dogs (successes) in a sample with exactly 12 animals that
aren't dogs (failures), we can initialize a frozen distribution
and plot the probability mass function:
>>> M, n, r = [20, 7, 12]
>>> rv = nhypergeom(M, n, r)
>>> x = np.arange(0, n+2)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group with given 12 failures')
>>> ax.set_ylabel('nhypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `nhypergeom`
methods directly. To for example obtain the probability mass
function, use:
>>> prb = nhypergeom.pmf(x, M, n, r)
And to generate random numbers:
>>> R = nhypergeom.rvs(M, n, r, size=10)
To verify the relationship between `hypergeom` and `nhypergeom`, use:
>>> from scipy.stats import hypergeom, nhypergeom
>>> M, n, r = 45, 13, 8
>>> k = 6
>>> nhypergeom.pmf(k, M, n, r)
0.06180776620271643
>>> hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1))
0.06180776620271644
See Also
--------
hypergeom, binom, nbinom
References
----------
.. [1] Negative Hypergeometric Distribution on Wikipedia
https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution
.. [2] Negative Hypergeometric Distribution from
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Negativehypergeometric.pdf
"""
def _get_support(self, M, n, r):
return 0, n
def _argcheck(self, M, n, r):
cond = (n >= 0) & (n <= M) & (r >= 0) & (r <= M-n)
return cond
def _logpmf(self, k, M, n, r):
cond = ((r == 0) & (k == 0))
result = _lazywhere(~cond, (k, M, n, r),
lambda k, M, n, r:
(-betaln(k+1, r) + betaln(k+r, 1) -
betaln(n-k+1, M-r-n+1) + betaln(M-r-k+1, 1) +
betaln(n+1, M-n+1) - betaln(M+1, 1)),
fillvalue=0.0)
return result
def _pmf(self, k, M, n, r):
# same as the following but numerically more precise
# return comb(k+r-1, k) * comb(M-r-k, n-k) / comb(M, n)
return exp(self._logpmf(k, M, n, r))
def _stats(self, M, n, r):
# Promote the datatype to at least float
# mu = rn / (M-n+1)
M, n, r = 1.*M, 1.*n, 1.*r
mu = r*n / (M-n+1)
var = r*(M+1)*n / ((M-n+1)*(M-n+2)) * (1 - r / (M-n+1))
# The skew and kurtosis are mathematically
# intractable so return `None`. See [2]_.
g1, g2 = None, None
return mu, var, g1, g2
nhypergeom = nhypergeom_gen(name='nhypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
r"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is:
.. math::
f(k) = - \frac{p^k}{k \log(1-p)}
for :math:`k \ge 1`, :math:`0 < p < 1`
`logser` takes :math:`p` as shape parameter,
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure.
%(after_notes)s
%(example)s
"""
def _rvs(self, p, size=None, random_state=None):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return random_state.logseries(p, size=size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
# logser.pmf(k) = - p**k / (k*log(1-p))
return -np.power(p, k) * 1.0 / k / special.log1p(-p)
def _stats(self, p):
r = special.log1p(-p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
r"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is:
.. math::
f(k) = \exp(-\mu) \frac{\mu^k}{k!}
for :math:`k \ge 0`.
`poisson` takes :math:`\mu \geq 0` as shape parameter.
When :math:`\mu = 0`, the ``pmf`` method
returns ``1.0`` at quantile :math:`k = 0`.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu, size=None, random_state=None):
return random_state.poisson(mu, size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
# poisson.pmf(k) = exp(-mu) * mu**k / k!
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
r"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is:
.. math::
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k)
for :math:`k \ge 0` and :math:`\lambda > 0`.
`planck` takes :math:`\lambda` as shape parameter. The Planck distribution
can be written as a geometric distribution (`geom`) with
:math:`p = 1 - \exp(-\lambda)` shifted by ``loc = -1``.
%(after_notes)s
See Also
--------
geom
%(example)s
"""
def _argcheck(self, lambda_):
return lambda_ > 0
def _pmf(self, k, lambda_):
return -expm1(-lambda_)*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return -expm1(-lambda_*(k+1))
def _sf(self, x, lambda_):
return exp(self._logsf(x, lambda_))
def _logsf(self, x, lambda_):
k = floor(x)
return -lambda_*(k+1)
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(*(self._get_support(lambda_)))
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _rvs(self, lambda_, size=None, random_state=None):
# use relation to geometric distribution for sampling
p = -expm1(-lambda_)
return random_state.geometric(p, size=size) - 1.0
def _stats(self, lambda_):
mu = 1/expm1(lambda_)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
C = -expm1(-lambda_)
return lambda_*exp(-lambda_)/C - log(C)
planck = planck_gen(a=0, name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
r"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is:
.. math::
f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N))
for :math:`k = 0,..., N-1`.
`boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_, N):
return (lambda_ > 0) & (N > 0)
def _get_support(self, lambda_, N):
return self.a, N - 1
def _pmf(self, k, lambda_, N):
# boltzmann.pmf(k) =
# (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann', a=0,
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
r"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is:
.. math::
f(k) = \frac{1}{\texttt{high} - \texttt{low}}
for :math:`k \in \{\texttt{low}, \dots, \texttt{high} - 1\}`.
`randint` takes :math:`\texttt{low}` and :math:`\texttt{high}` as shape
parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
return (high > low)
def _get_support(self, low, high):
return low, high-1
def _pmf(self, k, low, high):
# randint.pmf(k) = 1./(high - low)
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high, size=None, random_state=None):
"""An array of *size* random integers >= ``low`` and < ``high``."""
if np.asarray(low).size == 1 and np.asarray(high).size == 1:
# no need to vectorize in that case
return rng_integers(random_state, low, high, size=size)
if size is not None:
# NumPy's RandomState.randint() doesn't broadcast its arguments.
# Use `broadcast_to()` to extend the shapes of low and high
# up to size. Then we can use the numpy.vectorize'd
# randint without needing to pass it a `size` argument.
low = np.broadcast_to(low, size)
high = np.broadcast_to(high, size)
randint = np.vectorize(partial(rng_integers, random_state),
otypes=[np.int_])
return randint(low, high)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
r"""A Zipf (Zeta) discrete random variable.
%(before_notes)s
See Also
--------
zipfian
Notes
-----
The probability mass function for `zipf` is:
.. math::
f(k, a) = \frac{1}{\zeta(a) k^a}
for :math:`k \ge 1`, :math:`a > 1`.
`zipf` takes :math:`a > 1` as shape parameter. :math:`\zeta` is the
Riemann zeta function (`scipy.special.zeta`)
The Zipf distribution is also known as the zeta distribution, which is
a special case of the Zipfian distribution (`zipfian`).
%(after_notes)s
References
----------
.. [1] "Zeta Distribution", Wikipedia,
https://en.wikipedia.org/wiki/Zeta_distribution
%(example)s
Confirm that `zipf` is the large `n` limit of `zipfian`.
>>> from scipy.stats import zipfian
>>> k = np.arange(11)
>>> np.allclose(zipf.pmf(k, a), zipfian.pmf(k, a, n=10000000))
True
"""
def _rvs(self, a, size=None, random_state=None):
return random_state.zipf(a, size=size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
# zipf.pmf(k, a) = 1/(zeta(a) * k**a)
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
def _gen_harmonic_gt1(n, a):
"""Generalized harmonic number, a > 1"""
# See https://en.wikipedia.org/wiki/Harmonic_number; search for "hurwitz"
return zeta(a, 1) - zeta(a, n+1)
def _gen_harmonic_leq1(n, a):
"""Generalized harmonic number, a <= 1"""
if not np.size(n):
return n
n_max = np.max(n) # loop starts at maximum of all n
out = np.zeros_like(a, dtype=float)
# add terms of harmonic series; starting from smallest to avoid roundoff
for i in np.arange(n_max, 0, -1, dtype=float):
mask = i <= n # don't add terms after nth
out[mask] += 1/i**a[mask]
return out
def _gen_harmonic(n, a):
"""Generalized harmonic number"""
n, a = np.broadcast_arrays(n, a)
return _lazywhere(a > 1, (n, a),
f=_gen_harmonic_gt1, f2=_gen_harmonic_leq1)
class zipfian_gen(rv_discrete):
r"""A Zipfian discrete random variable.
%(before_notes)s
See Also
--------
zipf
Notes
-----
The probability mass function for `zipfian` is:
.. math::
f(k, a, n) = \frac{1}{H_{n,a} k^a}
for :math:`k \in \{1, 2, \dots, n-1, n\}`, :math:`a \ge 0`,
:math:`n \in \{1, 2, 3, \dots\}`.
`zipfian` takes :math:`a` and :math:`n` as shape parameters.
:math:`H_{n,a}` is the :math:`n`:sup:`th` generalized harmonic
number of order :math:`a`.
The Zipfian distribution reduces to the Zipf (zeta) distribution as
:math:`n \rightarrow \infty`.
%(after_notes)s
References
----------
.. [1] "Zipf's Law", Wikipedia, https://en.wikipedia.org/wiki/Zipf's_law
.. [2] Larry Leemis, "Zipf Distribution", Univariate Distribution
Relationships. http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
%(example)s
Confirm that `zipfian` reduces to `zipf` for large `n`, `a > 1`.
>>> from scipy.stats import zipf
>>> k = np.arange(11)
>>> np.allclose(zipfian.pmf(k, a=3.5, n=10000000), zipf.pmf(k, a=3.5))
True
"""
def _argcheck(self, a, n):
# we need np.asarray here because moment (maybe others) don't convert
return (a >= 0) & (n > 0) & (n == np.asarray(n, dtype=int))
def _get_support(self, a, n):
return 1, n
def _pmf(self, k, a, n):
return 1.0 / _gen_harmonic(n, a) / k**a
def _cdf(self, k, a, n):
return _gen_harmonic(k, a) / _gen_harmonic(n, a)
def _sf(self, k, a, n):
k = k + 1 # # to match SciPy convention
# see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
return ((k**a*(_gen_harmonic(n, a) - _gen_harmonic(k, a)) + 1)
/ (k**a*_gen_harmonic(n, a)))
def _stats(self, a, n):
# see # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
Hna = _gen_harmonic(n, a)
Hna1 = _gen_harmonic(n, a-1)
Hna2 = _gen_harmonic(n, a-2)
Hna3 = _gen_harmonic(n, a-3)
Hna4 = _gen_harmonic(n, a-4)
mu1 = Hna1/Hna
mu2n = (Hna2*Hna - Hna1**2)
mu2d = Hna**2
mu2 = mu2n / mu2d
g1 = (Hna3/Hna - 3*Hna1*Hna2/Hna**2 + 2*Hna1**3/Hna**3)/mu2**(3/2)
g2 = (Hna**3*Hna4 - 4*Hna**2*Hna1*Hna3 + 6*Hna*Hna1**2*Hna2
- 3*Hna1**4) / mu2n**2
g2 -= 3
return mu1, mu2, g1, g2
zipfian = zipfian_gen(a=1, name='zipfian', longname='A Zipfian')
class dlaplace_gen(rv_discrete):
r"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is:
.. math::
f(k) = \tanh(a/2) \exp(-a |k|)
for integers :math:`k` and :math:`a > 0`.
`dlaplace` takes :math:`a` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
# dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)),
log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
def _rvs(self, a, size=None, random_state=None):
# The discrete Laplace is equivalent to the two-sided geometric
# distribution with PMF:
# f(k) = (1 - alpha)/(1 + alpha) * alpha^abs(k)
# Reference:
# https://www.sciencedirect.com/science/
# article/abs/pii/S0378375804003519
# Furthermore, the two-sided geometric distribution is
# equivalent to the difference between two iid geometric
# distributions.
# Reference (page 179):
# https://pdfs.semanticscholar.org/61b3/
# b99f466815808fd0d03f5d2791eea8b541a1.pdf
# Thus, we can leverage the following:
# 1) alpha = e^-a
# 2) probability_of_success = 1 - alpha (Bernoulli trial)
probOfSuccess = -np.expm1(-np.asarray(a))
x = random_state.geometric(probOfSuccess, size=size)
y = random_state.geometric(probOfSuccess, size=size)
return x - y
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
r"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with
expected values :math:`\lambda_1` and :math:`\lambda_2`. Then,
:math:`k_1 - k_2` follows a Skellam distribution with parameters
:math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and
:math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where
:math:`\rho` is the correlation coefficient between :math:`k_1` and
:math:`k_2`. If the two Poisson-distributed r.v. are independent then
:math:`\rho = 0`.
Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive.
For details see: https://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2, size=None, random_state=None):
n = size
return (random_state.poisson(mu1, n) -
random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1 - _ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
class yulesimon_gen(rv_discrete):
r"""A Yule-Simon discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for the `yulesimon` is:
.. math::
f(k) = \alpha B(k, \alpha+1)
for :math:`k=1,2,3,...`, where :math:`\alpha>0`.
Here :math:`B` refers to the `scipy.special.beta` function.
The sampling of random variates is based on pg 553, Section 6.3 of [1]_.
Our notation maps to the referenced logic via :math:`\alpha=a-1`.
For details see the wikipedia entry [2]_.
References
----------
.. [1] Devroye, Luc. "Non-uniform Random Variate Generation",
(1986) Springer, New York.
.. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution
%(after_notes)s
%(example)s
"""
def _rvs(self, alpha, size=None, random_state=None):
E1 = random_state.standard_exponential(size)
E2 = random_state.standard_exponential(size)
ans = ceil(-E1 / log1p(-exp(-E2 / alpha)))
return ans
def _pmf(self, x, alpha):
return alpha * special.beta(x, alpha + 1)
def _argcheck(self, alpha):
return (alpha > 0)
def _logpmf(self, x, alpha):
return log(alpha) + special.betaln(x, alpha + 1)
def _cdf(self, x, alpha):
return 1 - x * special.beta(x, alpha + 1)
def _sf(self, x, alpha):
return x * special.beta(x, alpha + 1)
def _logsf(self, x, alpha):
return log(x) + special.betaln(x, alpha + 1)
def _stats(self, alpha):
mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1))
mu2 = np.where(alpha > 2,
alpha**2 / ((alpha - 2.0) * (alpha - 1)**2),
np.inf)
mu2 = np.where(alpha <= 1, np.nan, mu2)
g1 = np.where(alpha > 3,
sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)),
np.inf)
g1 = np.where(alpha <= 2, np.nan, g1)
g2 = np.where(alpha > 4,
(alpha + 3) + (alpha**3 - 49 * alpha - 22) / (alpha *
(alpha - 4) * (alpha - 3)), np.inf)
g2 = np.where(alpha <= 2, np.nan, g2)
return mu, mu2, g1, g2
yulesimon = yulesimon_gen(name='yulesimon', a=1)
def _vectorize_rvs_over_shapes(_rvs1):
"""Decorator that vectorizes _rvs method to work on ndarray shapes"""
# _rvs1 must be a _function_ that accepts _scalar_ args as positional
# arguments, `size` and `random_state` as keyword arguments.
# _rvs1 must return a random variate array with shape `size`. If `size` is
# None, _rvs1 must return a scalar.
# When applied to _rvs1, this decorator broadcasts ndarray args
# and loops over them, calling _rvs1 for each set of scalar args.
# For usage example, see _nchypergeom_gen
def _rvs(*args, size, random_state):
_rvs1_size, _rvs1_indices = _check_shape(args[0].shape, size)
size = np.array(size)
_rvs1_size = np.array(_rvs1_size)
_rvs1_indices = np.array(_rvs1_indices)
if np.all(_rvs1_indices): # all args are scalars
return _rvs1(*args, size, random_state)
out = np.empty(size)
# out.shape can mix dimensions associated with arg_shape and _rvs1_size
# Sort them to arg_shape + _rvs1_size for easy indexing of dimensions
# corresponding with the different sets of scalar args
j0 = np.arange(out.ndim)
j1 = np.hstack((j0[~_rvs1_indices], j0[_rvs1_indices]))
out = np.moveaxis(out, j1, j0)
for i in np.ndindex(*size[~_rvs1_indices]):
# arg can be squeezed because singleton dimensions will be
# associated with _rvs1_size, not arg_shape per _check_shape
out[i] = _rvs1(*[np.squeeze(arg)[i] for arg in args],
_rvs1_size, random_state)
return np.moveaxis(out, j0, j1) # move axes back before returning
return _rvs
class _nchypergeom_gen(rv_discrete):
r"""A noncentral hypergeometric discrete random variable.
For subclassing by nchypergeom_fisher_gen and nchypergeom_wallenius_gen.
"""
rvs_name = None
dist = None
def _get_support(self, M, n, N, odds):
N, m1, n = M, n, N # follow Wikipedia notation
m2 = N - m1
x_min = np.maximum(0, n - m2)
x_max = np.minimum(n, m1)
return x_min, x_max
def _argcheck(self, M, n, N, odds):
M, n = np.asarray(M), np.asarray(n),
N, odds = np.asarray(N), np.asarray(odds)
cond1 = (M.astype(int) == M) & (M >= 0)
cond2 = (n.astype(int) == n) & (n >= 0)
cond3 = (N.astype(int) == N) & (N >= 0)
cond4 = odds > 0
cond5 = N <= M
cond6 = n <= M
return cond1 & cond2 & cond3 & cond4 & cond5 & cond6
def _rvs(self, M, n, N, odds, size=None, random_state=None):
@_vectorize_rvs_over_shapes
def _rvs1(M, n, N, odds, size, random_state):
length = np.prod(size)
urn = _PyStochasticLib3()
rv_gen = getattr(urn, self.rvs_name)
rvs = rv_gen(N, n, M, odds, length, random_state)
rvs = rvs.reshape(size)
return rvs
return _rvs1(M, n, N, odds, size=size, random_state=random_state)
def _pmf(self, x, M, n, N, odds):
@np.vectorize
def _pmf1(x, M, n, N, odds):
urn = self.dist(N, n, M, odds, 1e-12)
return urn.probability(x)
return _pmf1(x, M, n, N, odds)
def _stats(self, M, n, N, odds, moments):
@np.vectorize
def _moments1(M, n, N, odds):
urn = self.dist(N, n, M, odds, 1e-12)
return urn.moments()
m, v = _moments1(M, n, N, odds) if ("m" in moments
or "v" in moments) else None
s, k = None, None
return m, v, s, k
class nchypergeom_fisher_gen(_nchypergeom_gen):
r"""A Fisher's noncentral hypergeometric discrete random variable.
Fisher's noncentral hypergeometric distribution models drawing objects of
two types from a bin. `M` is the total number of objects, `n` is the
number of Type I objects, and `odds` is the odds ratio: the odds of
selecting a Type I object rather than a Type II object when there is only
one object of each type.
The random variate represents the number of Type I objects drawn if we
take a handful of objects from the bin at once and find out afterwards
that we took `N` objects.
%(before_notes)s
See Also
--------
nchypergeom_wallenius, hypergeom, nhypergeom
Notes
-----
Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
with parameters `N`, `n`, and `M` (respectively) as defined above.
The probability mass function is defined as
.. math::
p(x; M, n, N, \omega) =
\frac{\binom{n}{x}\binom{M - n}{N-x}\omega^x}{P_0},
for
:math:`x \in [x_l, x_u]`,
:math:`M \in {\mathbb N}`,
:math:`n \in [0, M]`,
:math:`N \in [0, M]`,
:math:`\omega > 0`,
where
:math:`x_l = \max(0, N - (M - n))`,
:math:`x_u = \min(N, n)`,
.. math::
P_0 = \sum_{y=x_l}^{x_u} \binom{n}{y}\binom{M - n}{N-y}\omega^y,
and the binomial coefficients are defined as
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
`nchypergeom_fisher` uses the BiasedUrn package by Agner Fog with
permission for it to be distributed under SciPy's license.
The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
universally accepted; they are chosen for consistency with `hypergeom`.
Note that Fisher's noncentral hypergeometric distribution is distinct
from Wallenius' noncentral hypergeometric distribution, which models
drawing a pre-determined `N` objects from a bin one by one.
When the odds ratio is unity, however, both distributions reduce to the
ordinary hypergeometric distribution.
%(after_notes)s
References
----------
.. [1] Agner Fog, "Biased Urn Theory".
https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
.. [2] "Fisher's noncentral hypergeometric distribution", Wikipedia,
https://en.wikipedia.org/wiki/Fisher's_noncentral_hypergeometric_distribution
%(example)s
"""
rvs_name = "rvs_fisher"
dist = _PyFishersNCHypergeometric
nchypergeom_fisher = nchypergeom_fisher_gen(
name='nchypergeom_fisher',
longname="A Fisher's noncentral hypergeometric")
class nchypergeom_wallenius_gen(_nchypergeom_gen):
r"""A Wallenius' noncentral hypergeometric discrete random variable.
Wallenius' noncentral hypergeometric distribution models drawing objects of
two types from a bin. `M` is the total number of objects, `n` is the
number of Type I objects, and `odds` is the odds ratio: the odds of
selecting a Type I object rather than a Type II object when there is only
one object of each type.
The random variate represents the number of Type I objects drawn if we
draw a pre-determined `N` objects from a bin one by one.
%(before_notes)s
See Also
--------
nchypergeom_fisher, hypergeom, nhypergeom
Notes
-----
Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
with parameters `N`, `n`, and `M` (respectively) as defined above.
The probability mass function is defined as
.. math::
p(x; N, n, M) = \binom{n}{x} \binom{M - n}{N-x}
\int_0^1 \left(1-t^{\omega/D}\right)^x\left(1-t^{1/D}\right)^{N-x} dt
for
:math:`x \in [x_l, x_u]`,
:math:`M \in {\mathbb N}`,
:math:`n \in [0, M]`,
:math:`N \in [0, M]`,
:math:`\omega > 0`,
where
:math:`x_l = \max(0, N - (M - n))`,
:math:`x_u = \min(N, n)`,
.. math::
D = \omega(n - x) + ((M - n)-(N-x)),
and the binomial coefficients are defined as
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
`nchypergeom_wallenius` uses the BiasedUrn package by Agner Fog with
permission for it to be distributed under SciPy's license.
The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
universally accepted; they are chosen for consistency with `hypergeom`.
Note that Wallenius' noncentral hypergeometric distribution is distinct
from Fisher's noncentral hypergeometric distribution, which models
take a handful of objects from the bin at once, finding out afterwards
that `N` objects were taken.
When the odds ratio is unity, however, both distributions reduce to the
ordinary hypergeometric distribution.
%(after_notes)s
References
----------
.. [1] Agner Fog, "Biased Urn Theory".
https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
.. [2] "Wallenius' noncentral hypergeometric distribution", Wikipedia,
https://en.wikipedia.org/wiki/Wallenius'_noncentral_hypergeometric_distribution
%(example)s
"""
rvs_name = "rvs_wallenius"
dist = _PyWalleniusNCHypergeometric
nchypergeom_wallenius = nchypergeom_wallenius_gen(
name='nchypergeom_wallenius',
longname="A Wallenius' noncentral hypergeometric")
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
|
bsd-3-clause
|
samuel1208/scikit-learn
|
sklearn/semi_supervised/tests/test_label_propagation.py
|
307
|
1974
|
""" test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
|
bsd-3-clause
|
richardliaw/ray
|
python/ray/tune/tests/test_experiment_analysis.py
|
1
|
7614
|
import unittest
import shutil
import tempfile
import random
import os
import pandas as pd
from numpy import nan
import ray
from ray import tune
from ray.tune.utils.mock import MyTrainableClass
class ExperimentAnalysisSuite(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(
num_cpus=4, num_gpus=0, local_mode=True, include_dashboard=False)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.test_name = "analysis_exp"
self.num_samples = 10
self.metric = "episode_reward_mean"
self.test_path = os.path.join(self.test_dir, self.test_name)
self.run_test_exp()
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
def run_test_exp(self):
self.ea = tune.run(
MyTrainableClass,
name=self.test_name,
local_dir=self.test_dir,
stop={"training_iteration": 1},
checkpoint_freq=1,
num_samples=self.num_samples,
config={
"width": tune.sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": tune.sample_from(
lambda spec: int(100 * random.random())),
})
def nan_test_exp(self):
nan_ea = tune.run(
lambda x: nan,
name="testing_nan",
local_dir=self.test_dir,
stop={"training_iteration": 1},
checkpoint_freq=1,
num_samples=self.num_samples,
config={
"width": tune.sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": tune.sample_from(
lambda spec: int(100 * random.random())),
})
return nan_ea
def testDataframe(self):
df = self.ea.dataframe(self.metric, mode="max")
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertEquals(df.shape[0], self.num_samples)
def testStats(self):
assert self.ea.stats()
assert self.ea.runner_data()
def testTrialDataframe(self):
checkpoints = self.ea._checkpoints
idx = random.randint(0, len(checkpoints) - 1)
trial_df = self.ea.trial_dataframes[checkpoints[idx]["logdir"]]
self.assertTrue(isinstance(trial_df, pd.DataFrame))
self.assertEqual(trial_df.shape[0], 1)
def testBestConfig(self):
best_config = self.ea.get_best_config(self.metric, mode="max")
self.assertTrue(isinstance(best_config, dict))
self.assertTrue("width" in best_config)
self.assertTrue("height" in best_config)
def testBestConfigNan(self):
nan_ea = self.nan_test_exp()
best_config = nan_ea.get_best_config(self.metric, mode="max")
self.assertIsNone(best_config)
def testBestLogdir(self):
logdir = self.ea.get_best_logdir(self.metric, mode="max")
self.assertTrue(logdir.startswith(self.test_path))
logdir2 = self.ea.get_best_logdir(self.metric, mode="min")
self.assertTrue(logdir2.startswith(self.test_path))
self.assertNotEquals(logdir, logdir2)
def testBestLogdirNan(self):
nan_ea = self.nan_test_exp()
logdir = nan_ea.get_best_logdir(self.metric, mode="max")
self.assertIsNone(logdir)
def testGetTrialCheckpointsPathsByTrial(self):
best_trial = self.ea.get_best_trial(self.metric, mode="max")
checkpoints_metrics = self.ea.get_trial_checkpoints_paths(best_trial)
logdir = self.ea.get_best_logdir(self.metric, mode="max")
expected_path = os.path.join(logdir, "checkpoint_1", "checkpoint")
assert checkpoints_metrics[0][0] == expected_path
assert checkpoints_metrics[0][1] == 1
def testGetTrialCheckpointsPathsByPath(self):
logdir = self.ea.get_best_logdir(self.metric, mode="max")
checkpoints_metrics = self.ea.get_trial_checkpoints_paths(logdir)
expected_path = os.path.join(logdir, "checkpoint_1/", "checkpoint")
assert checkpoints_metrics[0][0] == expected_path
assert checkpoints_metrics[0][1] == 1
def testGetTrialCheckpointsPathsWithMetricByTrial(self):
best_trial = self.ea.get_best_trial(self.metric, mode="max")
paths = self.ea.get_trial_checkpoints_paths(best_trial, self.metric)
logdir = self.ea.get_best_logdir(self.metric, mode="max")
expected_path = os.path.join(logdir, "checkpoint_1", "checkpoint")
assert paths[0][0] == expected_path
assert paths[0][1] == best_trial.metric_analysis[self.metric]["last"]
def testGetTrialCheckpointsPathsWithMetricByPath(self):
best_trial = self.ea.get_best_trial(self.metric, mode="max")
logdir = self.ea.get_best_logdir(self.metric, mode="max")
paths = self.ea.get_trial_checkpoints_paths(best_trial, self.metric)
expected_path = os.path.join(logdir, "checkpoint_1", "checkpoint")
assert paths[0][0] == expected_path
assert paths[0][1] == best_trial.metric_analysis[self.metric]["last"]
def testGetBestCheckpoint(self):
best_trial = self.ea.get_best_trial(self.metric, mode="max")
checkpoints_metrics = self.ea.get_trial_checkpoints_paths(best_trial)
expected_path = max(checkpoints_metrics, key=lambda x: x[1])[0]
best_checkpoint = self.ea.get_best_checkpoint(
best_trial, self.metric, mode="max")
assert expected_path == best_checkpoint
def testAllDataframes(self):
dataframes = self.ea.trial_dataframes
self.assertTrue(len(dataframes) == self.num_samples)
self.assertTrue(isinstance(dataframes, dict))
for df in dataframes.values():
self.assertEqual(df.training_iteration.max(), 1)
def testIgnoreOtherExperiment(self):
analysis = tune.run(
MyTrainableClass,
name="test_example",
local_dir=self.test_dir,
stop={"training_iteration": 1},
num_samples=1,
config={
"width": tune.sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": tune.sample_from(
lambda spec: int(100 * random.random())),
})
df = analysis.dataframe(self.metric, mode="max")
self.assertEquals(df.shape[0], 1)
class ExperimentAnalysisPropertySuite(unittest.TestCase):
def testBestProperties(self):
def train(config):
for i in range(10):
with tune.checkpoint_dir(i):
pass
tune.report(res=config["base"] + i)
ea = tune.run(
train,
config={"base": tune.grid_search([100, 200, 300])},
metric="res",
mode="max")
trials = ea.trials
self.assertEquals(ea.best_trial, trials[2])
self.assertEquals(ea.best_config, trials[2].config)
self.assertEquals(ea.best_logdir, trials[2].logdir)
self.assertEquals(ea.best_checkpoint, trials[2].checkpoint.value)
self.assertTrue(
all(ea.best_dataframe["trial_id"] == trials[2].trial_id))
self.assertEquals(ea.results_df.loc[trials[2].trial_id, "res"], 309)
self.assertEquals(ea.best_result["res"], 309)
self.assertEquals(ea.best_result_df.loc[trials[2].trial_id, "res"],
309)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
apache-2.0
|
tequa/ammisoft
|
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/projections/geo.py
|
10
|
21953
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
import matplotlib.spines as mspines
import matplotlib.axis as maxis
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = np.round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return "%0.0f\u00b0" % degrees
RESOLUTION = 75
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Do not register xaxis or yaxis with spines -- as done in
# Axes._init_axis() -- until GeoAxes.xaxis.cla() works.
# self.spines['geo'].register_axis(self.yaxis)
self._update_transScale()
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self,which='grid'):
if which not in ['tick1','tick2','grid']:
msg = "'which' must be on of [ 'tick1' | 'tick2' | 'grid' ]"
raise ValueError(msg)
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self,which='grid'):
if which not in ['tick1','tick2','grid']:
msg = "'which' must be one of [ 'tick1' | 'tick2' | 'grid' ]"
raise ValueError(msg)
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'geo':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
raise TypeError("It is not possible to change axes limits "
"for geographic projections. Please consider "
"using Basemap or Cartopy.")
set_ylim = set_xlim
def format_coord(self, lon, lat):
'return a format string formatting the coordinate'
lon = lon * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if lon >= 0.0:
ew = 'E'
else:
ew = 'W'
return '%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(lon), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
This axes object does not support interactive zoom box.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
This axes object does not support interactive pan/zoom.
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# The numerators also need to be masked so that masked
# division will be invoked.
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * ma.sin(half_long)) / sinc_alpha
y = (ma.sin(latitude) / sinc_alpha)
return np.concatenate((x.filled(0), y.filled(0)), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = np.sqrt(1.0 + cos_latitude * np.cos(half_long))
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
def d(theta):
delta = -(theta + np.sin(theta) - pi_sin_l) / (1 + np.cos(theta))
return delta, np.abs(delta) > 0.001
longitude = ll[:, 0]
latitude = ll[:, 1]
clat = np.pi/2 - np.abs(latitude)
ihigh = clat < 0.087 # within 5 degrees of the poles
ilow = ~ihigh
aux = np.empty(latitude.shape, dtype=np.float)
if ilow.any(): # Newton-Raphson iteration
pi_sin_l = np.pi * np.sin(latitude[ilow])
theta = 2.0 * latitude[ilow]
delta, large_delta = d(theta)
while np.any(large_delta):
theta[large_delta] += delta[large_delta]
delta, large_delta = d(theta)
aux[ilow] = theta / 2
if ihigh.any(): # Taylor series-based approx. solution
e = clat[ihigh]
d = 0.5 * (3 * np.pi * e**2) ** (1.0/3)
aux[ihigh] = (np.pi/2 - d) * np.sign(latitude[ihigh])
xy = np.empty(ll.shape, dtype=np.float)
xy[:,0] = (2.0 * np.sqrt(2.0) / np.pi) * longitude * np.cos(aux)
xy[:,1] = np.sqrt(2.0) * np.sin(aux)
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
# from Equations (7, 8) of
# http://mathworld.wolfram.com/MollweideProjection.html
theta = np.arcsin(y / np.sqrt(2))
lon = (np.pi / (2 * np.sqrt(2))) * x / np.cos(theta)
lat = np.arcsin((2 * theta + np.sin(2 * theta)) / np.pi)
return np.concatenate((lon, lat), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
lon = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((lon, lat), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
|
bsd-3-clause
|
EVS-ATMOS/high_resolution_hydrology
|
scripts/chicago_flood.py
|
2
|
4075
|
#!/usr/bin/env python
import matplotlib
matplotlib.use('agg')
import pyart
from matplotlib import pyplot as plt
from netCDF4 import num2date, date2num
import numpy as np
from time import time
import os
#here is the key import!
from IPython.parallel import Client
def do_grid_map_gates_to_grid(radar_fname):
import pyart
from matplotlib import pyplot as plt
from netCDF4 import num2date, date2num
import numpy as np
from time import time
import os
md = '/lcrc/group/earthscience/radar/nexrad/chicago_floods/'
try:
radar = pyart.io.read(radar_fname)
rain_z = radar.fields['reflectivity']['data'].copy()
z_lin = 10.0**(radar.fields['reflectivity']['data']/10.)
rain_z = (z_lin/300.0)**(1./1.4) #Z=300 R1.4
radar.add_field_like('reflectivity', 'rain_z', rain_z, replace_existing = True)
radar.fields['rain_z']['units'] = 'mm/h'
radar.fields['rain_z']['standard_name'] = 'rainfall_rate'
radar.fields['rain_z']['long_name'] = 'rainfall_rate_from_z'
radar.fields['rain_z']['valid_min'] = 0
radar.fields['rain_z']['valid_max'] = 500
grid = pyart.map.grid_from_radars(
(radar,), grid_shape=(1, 501, 501),
grid_limits=((0, 0),(-50000, 50000), (-50000, 50000)),
fields=radar.fields.keys(), gridding_algo="map_gates_to_grid",
weighting_function='BARNES')
dts = num2date(grid.axes['time']['data'], grid.axes['time']['units'])
sstr = dts[0].strftime('%Y%m%d_%H%M%S')
pyart.io.write_grid(md + 'grid_250_'+sstr+'.nc', grid)
myd = pyart.graph.RadarMapDisplay(radar)
fig = plt.figure(figsize = [18,10])
myd.plot_ppi_map( 'rain_z', vmin = 0, vmax = 100,
resolution = 'h', max_lat = 41.8,
min_lat = 41.25, min_lon = -88.3, max_lon = -87.5)
m = myd.basemap
m.drawparallels(np.linspace(41, 42, 9),labels=[1,0,0,0])
m.drawmeridians(np.linspace(-88.4, -87, 8),labels=[0,0,0,1])
m.drawrivers()
m.drawcounties()
m.drawstates()
m.drawmapscale(-88., 41.55, -88.,
41.55, 10, barstyle='fancy', fontcolor='k',
fillcolor1='b', fillcolor2='k')
myd.plot_point( -87.9706,41.6815,
label_text = 'Argonne Lab', label_offset = (0.0,0.0) )
plt.savefig(md+ 'radar_'+sstr+'.png')
plt.close(fig)
fig = plt.figure(figsize = [15,15])
max_lat = 43
min_lat = 41.5
min_lon = -88.3
max_lon = -87.5
display = pyart.graph.GridMapDisplay(grid)
display.plot_basemap(lat_lines=np.arange(min_lat,max_lat,.1),
lon_lines=np.arange(min_lon, max_lon, .1),
resolution='h')
display.plot_grid('rain_z', vmin=0, vmax=100)
xcf,ycf = display.basemap(-87.9706,41.6815)
display.basemap.plot(xcf,ycf,'ro')
plt.text(xcf+2000.,ycf+2000., 'Argonne Lab')
display.basemap.drawcounties()
display.basemap.drawrivers()
display.basemap.drawmapscale(-88., 41.55, -88., 41.55, 10, barstyle='fancy', fontcolor='k', fillcolor1='b', fillcolor2='k')
display.plot_colorbar()
plt.savefig(md+ 'mapped_250_'+sstr+'.png')
plt.close(fig)
del(radar)
del(grid)
except:
pass
return 0
md = '/lcrc/group/earthscience/radar/nexrad/chicago_floods/'
idir = md
filelist = os.listdir(md)
good_files = []
for fl in filelist:
if 'KLOT' in fl:
good_files.append(idir + fl)
good_files.sort()
My_Cluster = Client()
My_View = My_Cluster[:]
print My_View
print len(My_View)
#Turn off blocking so all engines can work async
My_View.block = False
#on all engines do an import of Py-ART
My_View.execute('import matplotlib')
My_View.execute('matplotlib.use("agg")')
#Map the code and input to all workers
result = My_View.map_async(do_grid_map_gates_to_grid, good_files)
#Reduce the result to get a list of output
qvps = result.get()
|
bsd-2-clause
|
clarkfitzg/dask
|
dask/dataframe/tests/test_rolling.py
|
6
|
2485
|
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import dask.dataframe as dd
from dask.async import get_sync
from dask.utils import raises, ignoring
def eq(p, d):
if isinstance(d, dd.DataFrame):
tm.assert_frame_equal(p, d.compute(get=get_sync))
else:
tm.assert_series_equal(p, d.compute(get=get_sync))
def rolling_tests(p, d):
eq(pd.rolling_count(p, 3), dd.rolling_count(d, 3))
eq(pd.rolling_sum(p, 3), dd.rolling_sum(d, 3))
eq(pd.rolling_mean(p, 3), dd.rolling_mean(d, 3))
eq(pd.rolling_median(p, 3), dd.rolling_median(d, 3))
eq(pd.rolling_min(p, 3), dd.rolling_min(d, 3))
eq(pd.rolling_max(p, 3), dd.rolling_max(d, 3))
eq(pd.rolling_std(p, 3), dd.rolling_std(d, 3))
eq(pd.rolling_var(p, 3), dd.rolling_var(d, 3))
eq(pd.rolling_skew(p, 3), dd.rolling_skew(d, 3))
eq(pd.rolling_kurt(p, 3), dd.rolling_kurt(d, 3))
eq(pd.rolling_quantile(p, 3, 0.5), dd.rolling_quantile(d, 3, 0.5))
mad = lambda x: np.fabs(x - x.mean()).mean()
eq(pd.rolling_apply(p, 3, mad), dd.rolling_apply(d, 3, mad))
with ignoring(ImportError):
eq(pd.rolling_window(p, 3, 'boxcar'), dd.rolling_window(d, 3, 'boxcar'))
# Test with edge-case window sizes
eq(pd.rolling_sum(p, 0), dd.rolling_sum(d, 0))
eq(pd.rolling_sum(p, 1), dd.rolling_sum(d, 1))
# Test with kwargs
eq(pd.rolling_sum(p, 3, min_periods=3), dd.rolling_sum(d, 3, min_periods=3))
def test_rolling_series():
ts = pd.Series(np.random.randn(25).cumsum())
dts = dd.from_pandas(ts, 3)
rolling_tests(ts, dts)
def test_rolling_dataframe():
df = pd.DataFrame({'a': np.random.randn(25).cumsum(),
'b': np.random.randn(25).cumsum()})
ddf = dd.from_pandas(df, 3)
rolling_tests(df, ddf)
def test_raises():
df = pd.DataFrame({'a': np.random.randn(25).cumsum(),
'b': np.random.randn(25).cumsum()})
ddf = dd.from_pandas(df, 3)
assert raises(TypeError, lambda: dd.rolling_mean(ddf, 1.5))
assert raises(ValueError, lambda: dd.rolling_mean(ddf, -1))
assert raises(NotImplementedError, lambda: dd.rolling_mean(ddf, 3, freq=2))
assert raises(NotImplementedError, lambda: dd.rolling_mean(ddf, 3, how='min'))
def test_rolling_names():
df = pd.DataFrame({'a': [1, 2, 3],
'b': [4, 5, 6]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(dd.rolling_sum(a, 2).dask) == sorted(dd.rolling_sum(a, 2).dask)
|
bsd-3-clause
|
pmla/polyhedral-template-matching
|
datagen/planar_graphs.py
|
1
|
1669
|
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
def plot_points(points, colours=None, labels=None):
fig = plt.figure()#figsize=(16,14))
fig.set_tight_layout(True)
ax = fig.add_subplot(111, projection='3d', proj_type='ortho')
if colours is not None:
for (index, c) in zip([0, 1, 2, 3], ['C0', 'C1', 'C2', 'C3']):
indices = np.where(colours == index)[0]
(xs, ys, zs) = zip(*points[indices])
ax.scatter(xs, ys, zs, c=c)
else:
(xs, ys, zs) = zip(*points)
ax.scatter(xs, ys, zs)
#for i, e in enumerate(points):
# c = 'rb'[i < 3]
# plt.plot([0, e[0]], [0, e[1]], [0, e[2]], c=c)
if labels is not None:
for p, l in zip(points, labels):
ax.text(p[0], p[1], p[2], l, size=30, color='k')
(xs, ys, zs) = zip(*points)
lim = max([abs(e) for e in xs+ys+zs])
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
ax.set_zlim(-lim, lim)
plt.show()
def _plot_hull(points, simplices, labels=None):
triangles = points[simplices]
fig = plt.figure()#figsize=(16,14))
fig.set_tight_layout(True)
ax = fig.add_subplot(111, projection='3d')
if 1:
color = (0.0, 0.0, 1., 0.2)
tri = Poly3DCollection(triangles)
tri.set_facecolor(color)
ax.add_collection3d(tri)
for t in triangles:
(xs, ys, zs) = zip(*[t[0], t[1], t[2], t[0]])
ax.plot(xs, ys, zs, c='k')
if labels is not None:
for p, l in zip(points, labels):
ax.text(p[0], p[1], p[2], l, size=30, color='k')
(xs, ys, zs) = zip(*points)
lim = max([abs(e) for e in xs+ys+zs])
#ax.set_xlim(-lim, lim)
#ax.set_ylim(-lim, lim)
#ax.set_zlim(-lim, lim)
plt.show()
|
mit
|
aricooperdavis/pitch-lengths
|
pl-pandas.py
|
1
|
2709
|
#! /usr/bin/env python
try:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
except Exception,e:
print "Your system does not have the necessary pre-requisites: "
print str(e)
sys.exit()
usage_string = """usage: .\pl-pandas.py command [command(s)]
Commands that can be run include:
pitch_info displays a graph pitch number frequency with stats
length_info displays a histogram of rope lengths with stats
help displays this usage information
For example a common usage might be:
.\pl-pandas.py pitch_info length_info"""
def import_data():
"""takes data from the pitch file"""
master_frame = pd.DataFrame()
with open("pitchFile.csv", 'r') as file:
for line in file:
if line != "\n" and line[0] != "#":
master_frame = pd.concat([master_frame, pd.DataFrame([tuple(line.strip().split(','))])], ignore_index=True)
numeric_frame = master_frame.ix[:,1:].astype(float)
return master_frame, numeric_frame
def pitch_info(master_frame):
number_pitches = numeric_frame.count()
mean_number_pitches = number_pitches.mean()
axis = number_pitches.plot(kind="bar", grid=True, rot=0)
axis.set_xlabel("Number of Pitches")
axis.set_ylabel("Number of Caves")
axis.annotate("Mean number of pitches: "+str(mean_number_pitches),(4,75),bbox=dict(facecolor="white", alpha=0.75, boxstyle="round,pad=1"))
plt.show()
def length_info(numeric_frame):
mean_length_pitches = np.around(numeric_frame.mean().mean(), decimals=2)
max_pitch_length = numeric_frame.max().max()
min_pitch_length = numeric_frame.min().min()
axis = numeric_frame.plot.hist(stacked=False, legend=False, bins=18, grid=True, rot=0, alpha=0.75)
axis.set_xlabel("Length of Pitch")
axis.set_ylabel("Number of Pitches")
axis.annotate("Mean length of pitches: "+str(mean_length_pitches)+"\n"+"Max pitch length: "+str(max_pitch_length)+"\n"+"Min pitch length: "+str(min_pitch_length),(110,20),bbox=dict(facecolor="white", alpha=0.75, boxstyle="round,pad=1"))
plt.show()
def get_arguments(usage_string):
if len(sys.argv) == 1:
print usage_string
sys.exit()
for i in range(1, len(sys.argv)):
if sys.argv[i] == "pitch_info":
pitch_info(master_frame)
elif sys.argv[i] == "length_info":
length_info(numeric_frame)
elif sys.argv[i] == "help":
print usage_string
sys.exit()
else:
print "Invalid command: "
print usage_string
sys.exit()
master_frame, numeric_frame = import_data()
get_arguments(usage_string)
|
gpl-3.0
|
mikebenfield/scikit-learn
|
benchmarks/bench_saga.py
|
45
|
8474
|
"""Author: Arthur Mensch
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.externals.joblib import delayed, Parallel, Memory
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=1e-24,
max_iter=this_max_iter,
random_state=42,
)
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
mem = Memory(cachedir=expanduser('~/cache'), verbose=0)
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
digits = load_digits()
X, y = digits.data, digits.target
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
cached_fit = mem.cache(fit_single)
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(cached_fit)(solver, X, y,
penalty=penalty, single_target=single_target,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for penalty in penalties)
res = []
idx = 0
for solver in solvers:
for penalty in penalties:
if not (skip_slow and solver == 'lightning' and penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot():
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target', 'penalty'], inplace=True)
grouped = res.groupby(level=['single_target', 'penalty'])
colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'}
for idx, group in grouped:
single_target, penalty = idx
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
train_scores = group['train_scores'].values
ref = np.min(np.concatenate(train_scores)) * 0.999
for scores, times, solver in zip(group['train_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(132)
test_scores = group['test_scores'].values
ref = np.min(np.concatenate(test_scores)) * 0.999
for scores, times, solver in zip(group['test_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(133)
for accuracy, times, solver in zip(group['accuracies'], group['times'],
group['solver']):
ax.plot(times, accuracy, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
name += '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig(name)
plt.close(fig)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
single_target = True
exp(solvers, penalties, single_target, n_samples=None, n_jobs=1,
dataset='20newspaper', max_iter=20)
plot()
|
bsd-3-clause
|
appapantula/scikit-learn
|
examples/linear_model/plot_lasso_coordinate_descent_path.py
|
254
|
2639
|
"""
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
uas/mavlink
|
pymavlink/tools/mavgraph.py
|
18
|
9628
|
#!/usr/bin/env python
'''
graph a MAVLink log file
Andrew Tridgell August 2011
'''
import sys, struct, time, os, datetime
import math, re
import matplotlib
from math import *
from pymavlink.mavextra import *
# cope with rename of raw_input in python3
try:
input = raw_input
except NameError:
pass
colourmap = {
'apm' : {
'MANUAL' : (1.0, 0, 0),
'AUTO' : ( 0, 1.0, 0),
'LOITER' : ( 0, 0, 1.0),
'FBWA' : (1.0, 0.5, 0),
'RTL' : ( 1, 0, 0.5),
'STABILIZE' : (0.5, 1.0, 0),
'LAND' : ( 0, 1.0, 0.5),
'STEERING' : (0.5, 0, 1.0),
'HOLD' : ( 0, 0.5, 1.0),
'ALT_HOLD' : (1.0, 0.5, 0.5),
'CIRCLE' : (0.5, 1.0, 0.5),
'POSITION' : (1.0, 0.0, 1.0),
'GUIDED' : (0.5, 0.5, 1.0),
'ACRO' : (1.0, 1.0, 0),
'CRUISE' : ( 0, 1.0, 1.0)
},
'px4' : {
'MANUAL' : (1.0, 0, 0),
'SEATBELT' : ( 0.5, 0.5, 0),
'EASY' : ( 0, 1.0, 0),
'AUTO' : ( 0, 0, 1.0),
'UNKNOWN' : ( 1.0, 1.0, 1.0)
}
}
edge_colour = (0.1, 0.1, 0.1)
lowest_x = None
highest_x = None
def plotit(x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
global lowest_x, highest_x
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if lowest_x is None or x[i][0] < lowest_x:
lowest_x = x[i][0]
if highest_x is None or x[i][-1] > highest_x:
highest_x = x[i][-1]
if highest_x is None or lowest_x is None:
return
xrange = highest_x - lowest_x
xrange *= 24 * 60 * 60
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
if not args.xaxis:
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if axes[i] == 2:
if ax2 == None:
ax2 = ax1.twinx()
ax = ax2
if not args.xaxis:
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
if args.xaxis:
if args.marker is not None:
marker = args.marker
else:
marker = '+'
if args.linestyle is not None:
linestyle = args.linestyle
else:
linestyle = 'None'
ax.plot(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker)
else:
if args.marker is not None:
marker = args.marker
else:
marker = 'None'
if args.linestyle is not None:
linestyle = args.linestyle
else:
linestyle = '-'
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker, tz=None)
empty = False
if args.flightmode is not None:
for i in range(len(modes)-1):
c = colourmap[args.flightmode].get(modes[i][1], edge_colour)
ax1.axvspan(modes[i][0], modes[i+1][0], fc=c, ec=edge_colour, alpha=0.1)
c = colourmap[args.flightmode].get(modes[-1][1], edge_colour)
ax1.axvspan(modes[-1][0], ax1.get_xlim()[1], fc=c, ec=edge_colour, alpha=0.1)
if ax1_labels != []:
ax1.legend(ax1_labels,loc=args.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=args.legend2)
if empty:
print("No data to graph")
return
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--planner", action='store_true', help="use planner file format")
parser.add_argument("--condition", default=None, help="select packets by a condition")
parser.add_argument("--labels", default=None, help="comma separated field labels")
parser.add_argument("--legend", default='upper left', help="default legend position")
parser.add_argument("--legend2", default='upper right', help="default legend2 position")
parser.add_argument("--marker", default=None, help="point marker")
parser.add_argument("--linestyle", default=None, help="line style")
parser.add_argument("--xaxis", default=None, help="X axis expression")
parser.add_argument("--multi", action='store_true', help="multiple files with same colours")
parser.add_argument("--zero-time-base", action='store_true', help="use Z time base for DF logs")
parser.add_argument("--flightmode", default=None,
help="Choose the plot background according to the active flight mode of the specified type, e.g. --flightmode=apm for ArduPilot or --flightmode=px4 for PX4 stack logs. Cannot be specified with --xaxis.")
parser.add_argument("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_argument("--output", default=None, help="provide an output format")
parser.add_argument("logs_fields", metavar="<LOG or FIELD>", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
if args.flightmode is not None and args.xaxis:
print("Cannot request flightmode backgrounds with an x-axis expression")
sys.exit(1)
if args.flightmode is not None and args.flightmode not in colourmap:
print("Unknown flight controller '%s' in specification of --flightmode" % args.flightmode)
sys.exit(1)
if args.output is not None:
matplotlib.use('Agg')
import pylab
filenames = []
fields = []
for f in args.logs_fields:
if os.path.exists(f):
filenames.append(f)
else:
fields.append(f)
msg_types = set()
multiplier = []
field_types = []
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey', 'yellow', 'brown', 'darkcyan', 'cornflowerblue', 'darkmagenta', 'deeppink', 'darkred']
# work out msg types we are interested in
x = []
y = []
modes = []
axes = []
first_only = []
re_caps = re.compile('[A-Z_][A-Z0-9_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
y.append([])
x.append([])
axes.append(1)
first_only.append(False)
def add_data(t, msg, vars, flightmode):
'''add some data'''
mtype = msg.get_type()
if args.flightmode is not None and (len(modes) == 0 or modes[-1][1] != flightmode):
modes.append((t, flightmode))
if mtype not in msg_types:
return
for i in range(0, len(fields)):
if mtype not in field_types[i]:
continue
f = fields[i]
if f.endswith(":2"):
axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
if args.xaxis is None:
xv = t
else:
xv = mavutil.evaluate_expression(args.xaxis, vars)
if xv is None:
continue
y[i].append(v)
x[i].append(xv)
def process_file(filename):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps, zero_time_base=args.zero_time_base, dialect=args.dialect)
vars = {}
while True:
msg = mlog.recv_match(args.condition)
if msg is None: break
tdays = matplotlib.dates.date2num(datetime.datetime.fromtimestamp(msg._timestamp))
add_data(tdays, msg, mlog.messages, mlog.flightmode)
if len(filenames) == 0:
print("No files to process")
sys.exit(1)
if args.labels is not None:
labels = args.labels.split(',')
if len(labels) != len(fields)*len(filenames):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(filenames)))
sys.exit(1)
else:
labels = None
for fi in range(0, len(filenames)):
f = filenames[fi]
process_file(f)
for i in range(0, len(x)):
if first_only[i] and fi != 0:
x[i] = []
y[i] = []
if labels:
lab = labels[fi*len(fields):(fi+1)*len(fields)]
else:
lab = fields[:]
if args.multi:
col = colors[:]
else:
col = colors[fi*len(fields):]
plotit(x, y, lab, colors=col)
for i in range(0, len(x)):
x[i] = []
y[i] = []
if args.output is None:
pylab.show()
pylab.draw()
input('press enter to exit....')
else:
pylab.legend(loc=2,prop={'size':8})
pylab.savefig(args.output, bbox_inches='tight', dpi=200)
|
lgpl-3.0
|
dmarx/VideoLinkBot
|
simplebot.py
|
1
|
13315
|
"""
Reddit bot that scrapes a post for videoo links
and posts the collection in a table as a comment.
This script is designed for scraping a single post.
Run as follows:
import simplebot as s
s.login(_user=username, _pass=password)
post_aggregate_links(submisison_id)
To run the bot as a continuous scrape of
/r/all/comments, use simplemonitor.py.
"""
import praw
from praw.errors import APIException
import re
import urlparse as up
from HTMLParser import HTMLParser
from lxml import etree
from urllib2 import Request, urlopen
import time
import pandas as pd
from video_host_utilities import youtube_link_cleaner, supported_domains, \
link_cleaners, title_cleaners, get_host_code
_ua = "YoutubeLinkBot reddit bot by /u/shaggorama"
r = praw.Reddit(_ua)
botCommentsMemo = {}
scrapedCommentsMemo = {}
scrapedLinksMemo = {}
def login(_user=None, _pass=None, fname='loginCredentials.txt'):
if _user is None and _pass is None:
with open(fname,'r') as f:
_user = f.readline().strip()
_pass = f.readline().strip()
print "Logging in as: {0} / {1}".format(_user, _pass)
r.login(username=_user, password=_pass)
def fix_html_entities(html, parser=HTMLParser()):
return parser.unescape( parser.unescape(html))
def get_video_links_from_html(text):
"""
Strips video link from a string in html format
by looking for the href attribute.
"""
# could also just use BeautifulSoup, but this regex works fine
link_pat = re.compile('href="(.*?)"')
links = link_pat.findall(text)
video_links = []
for l in links:
code = get_host_code(l)
if code:
clean = link_cleaners[code]
if clean:
link = clean(fix_html_entities(l))
if link:
video_links.append(link)
return video_links
def get_title(url, default = None, hparser=etree.HTMLParser(encoding='utf-8')):
"""
returns the title of a webpage given a url
(e.g. the title of a youtube video)
"""
def _get_title(_url):
HEADER = {'Accept-Language':'en-US,en;q=0.5'}
request = Request(_url, headers=HEADER)
data = urlopen(request)
htree=etree.parse(data, hparser)
raw_title = htree.find(".//title").text
code = get_host_code(_url)
title = title_cleaners[code](raw_title)
title = re.sub('[\|\*\[\]\(\)~\\\]','',title)
return title
try:
title = _get_title(url)
except Exception, e:
print "Encountered some error getting title for video at", url
print e
time.sleep(2)
try:
title = _get_title(url)
except:
print 'OK then, let''s just call it "%s"' % default
title = default
if title is None:
title = default
return title
def scrape(submission):
"""
Given a submission id, scrapes that submission and returns a list of comments
associated with their links
@submission: a
"""
### Should add in some functionality for recognizing when we've already maxed-out the comment length on a post.
### OH SHIT! better yet, figure out a way to RESPOND TO MY OWN COMMENT WITH ADDITIONAL LINKS.
# just for convenience
if type(submission) == type(''):
submission = r.get_submission(submission_id = submission)
# for updating links and whatever.
if scrapedLinksMemo.has_key(submission.id):
collected_links = scrapedLinksMemo[submission.id]
scrapedCommentIDs = scrapedCommentsMemo[submission.id]
print "We have already collected %d video links on this submission." % len(collected_links)
else:
scrapedCommentIDs = set()
scrapedCommentsMemo[submission.id] = scrapedCommentIDs
print "got %d comments" % len(submission.all_comments_flat)
for i, comment in enumerate(submission.all_comments_flat):
try:
if comment.author.name == r.user.name: # deleted comment handling doesn't seem to be working properly.
# if we have already memoized a bot comment for this post, continue
# otheriwse, confirm found bot comment contains links and if it does,
# memoize it.
if botCommentsMemo.has_key(submission.id):
continue
elif get_video_links_from_html(comment.body_html):
botCommentsMemo[submission.id] = comment
else:
links = get_video_links_from_html(comment.body_html)
for link in links:
add_memo_entry(comment, link)
except Exception, e:
# ignore deleted comments and comments by deleted users.
print "encountered some error in scrape()"
print e
continue # why do name attribute errors keep getting re-raised???
scrapedCommentIDs.add(comment.id)
collected_links = scrapedLinksMemo[submission.id]
print "Scraped {0} comments, found {1} links".format(i, len(collected_links) )
return collected_links # this isn't really even necessary since we could just call it down from the memo.
def get_scraped_comments(link_id):
""" to be retired in favor of call to memo"""
print "building comments memo"
if scrapedLinksMemo.has_key(link_id):
collected_comments = scrapedCommentsMemo[link_id]
scraped = set( [collected_comments[url]['id'] for url in collected_comments] )
else:
"Populating scrapedCommentsMemo with", link_id
scraped = set()
scrapedCommentsMemo[link_id] = {}
return scraped
def add_memo_entry(comment, link):
submission_id = comment.submission.id
if not link:
if not scrapedCommentsMemo.has_key(submission_id):
scrapedCommentsMemo[submission_id] = set() # this might be redundant
scrapedCommentsMemo[submission_id].add(comment.id)
try:
username = comment.author.name
except:
username = None
link_entry = {'author':username
,'created_utc':comment.created_utc
,'permalink':comment_shortlink(comment)
, 'id':comment.id
,'score':comment.score
,'title':None # This is lazy
}
if scrapedLinksMemo.has_key(submission_id):
collected_links = scrapedLinksMemo[submission_id]
try:
if collected_links.ix[link, 'score'] < comment.score:
# collected_links.ix[link, :] = link_entry ### I think this is causing the bug in issue # 25
# This is a shitty fix, but it should solve the problem.
for k in link_entry.keys():
collected_links.ix[link, k] = link_entry[k]
except KeyError, e:
new_rec = pd.DataFrame(link_entry, index=[link])
collected_links = collected_links.append(new_rec)
scrapedLinksMemo[submission_id] = collected_links
else:
scrapedLinksMemo[submission_id] = pd.DataFrame(link_entry, index=[link])
def comment_shortlink(c):
return 'http://reddit.com/comments/'+ c.link_id[3:] + '/_/' + c.id
def build_comment(collected_links, link_id=None):
print "Building comment"
head = '''Here is a list of video links collected from comments that redditors have made in response to this submission:
|Source Comment|Score|Video Link|
|:-------|:-------|:-------|\n'''
tail ="""\n* [VideoLinkBot FAQ](http://www.reddit.com/r/VideoLinkBot/wiki/faq)
* [Feedback](http://www.reddit.com/r/VideoLinkBot/submit)"""
titles = []
print "Getting video titles"
if link_id: # if we've been provided with a link_id, memoize the link titles.
for url in collected_links.index:
try:
if not scrapedLinksMemo[link_id].ix[url,'title']:
scrapedLinksMemo[link_id].ix[url,'title'] = get_title(url)
print "got title for",url
except Exception, e:
print "some problem getting title for", url
print e
continue
print "Got video titles. Formatting text for each link."
text=u''
for _url, c in scrapedLinksMemo[link_id].sort(columns='score',ascending=False).iterrows():
if c['title']:
_title = c['title']
else:
_title = _url
text += u'|[{author}]({permalink})|{score}|[{title}]({url})|\n'.format(
author=c['author']
,permalink = c['permalink']
,title = c['title']
,url = _url
,score= c['score']
)
len_playlist = 82 # I think...
print "Trimming content as needed"
text = trim_comment(text, 10000-len(head)-len(tail)-len_playlist)
print "Comment built."
return head+text+tail
def post_comment(link_id, subm, text):
try:
if botCommentsMemo.has_key(link_id):
bot_comment = botCommentsMemo[link_id]
print "editing", bot_comment.id
bot_comment.edit(text)
# need to overwrite existing comment object, otherwise we'll add playlist
# using the pre-scrape text.
# Manually overwrite 'body' attribute.
bot_comment.body = text
print "successfully updated comment."
else:
print "Posting new comment"
bot_comment = subm.add_comment(text)
botCommentsMemo[link_id] = bot_comment
print "Successfully posted new comment."
result = True
print bot_comment.id
except APIException, e:
# need to handle comments that are too long.
# Really, this should probably be in build_comment()
print e
print "sleeping for 5 seconds, trimming comment"
time.sleep(5) # maybe the API is annoyed with
trim_comment(text) # maybe the comment is too long (this should have been handled already)
result = False
return result
def trim_comment(text, targetsize=10000):
"""
If comment is longer than 10000 chars, reddit won't let us post it. This boils down to around 50 links (I think).
"""
# Removing permalink's to comments would significantly reduce the size of my comments.
# could still post a link to the user's commenting history
# Alternatively, could post a shortlink (?)
print "Trimming comment down to %d chars." % targetsize
while len(text)> targetsize:
text = '\n'.join(text.split('\n')[:-1])#[2:]
print "Processed comment length:",len(text)
return text
def add_playlist(c):
"""
Adds a radd.it playlist to an existing comment.
"""
playlist = "http://radd.it/comments/{0}/_/{1}?only=videos&start=1".format(c.link_id[3:], c.id)
text = c.body + "\n* [Playlist of videos in this comment]({0})".format(playlist)
c.edit(text)
def post_aggregate_links(link_id='178ki0', max_num_comments = 1000, min_num_comments = 8, min_num_links=5):
"""Not sure which function to call? You probably want this one."""
subm = r.get_submission(submission_id = link_id)
if not min_num_comments < subm.num_comments < max_num_comments:
print "[NO POST] Submission has %d comments. Not worth scraping." % subm.num_comments
return None
try:
print u'Scraping "{0}"'.format(subm.title)
except:
print u'Scraping "{0}"'.format(subm.id)
links = scrape(subm) # Theoretically, we could just pull this down from the memo.
n_links = len(links)
if n_links >= min_num_links:
authors = links.author.unique()
if len(authors) >1:
try:
print u'[POST] Posting {nlinks} links to "{sub}" post "{post}"'.\
format(nlinks = n_links
,sub = subm.subreddit.display_name
,post = subm.title)
except:
print u'[POST] Posting {nlinks} links to "{sub}" post "{post}"'.\
format(nlinks = n_links
,sub = subm.subreddit.id
,post = subm.id)
text = build_comment(links, subm.id)
print "comment built, trying to post."
posted = False
while not posted:
posted = post_comment(link_id, subm, text)
print "Appending playlist..."
add_playlist(botCommentsMemo[link_id])
print "Video links successfully posted."
else:
print "[NO POST] All links from same user. Need at least 2 different users to post."
else:
print "[NO POST] Only found %d links. Need %d to post." % (n_links, min_num_links)
if __name__ == '__main__':
login()
post_aggregate_links()
|
mit
|
sinhrks/expandas
|
pandas_ml/skaccessors/gaussian_process.py
|
3
|
2067
|
#!/usr/bin/env python
from pandas_ml.core.accessor import _AccessorMethods, _attach_methods, _wrap_data_func
class GaussianProcessMethods(_AccessorMethods):
"""
Accessor to ``sklearn.gaussian_process``.
"""
_module_name = 'sklearn.gaussian_process'
_method_mapper = dict(predict={'GaussianProcess': '_predict'})
@property
def correlation_models(self):
"""Property to access ``sklearn.gaussian_process.correlation_models``"""
module_name = 'sklearn.gaussian_process.correlation_models'
attrs = ['absolute_exponential', 'squared_exponential',
'generalized_exponential', 'pure_nugget',
'cubic', 'linear']
return _AccessorMethods(self._df, module_name=module_name, attrs=attrs)
@property
def regression_models(self):
"""Property to access ``sklearn.gaussian_process.regression_models``"""
return RegressionModelsMethods(self._df)
@classmethod
def _predict(cls, df, estimator, *args, **kwargs):
data = df.data.values
eval_MSE = kwargs.get('eval_MSE', False)
if eval_MSE:
y, MSE = estimator.predict(data, *args, **kwargs)
if y.ndim == 1:
y = df._constructor_sliced(y, index=df.index)
MSE = df._constructor_sliced(MSE, index=df.index)
else:
y = df._constructor(y, index=df.index)
MSE = df._constructor(MSE, index=df.index)
return y, MSE
else:
y = estimator.predict(data, *args, **kwargs)
if y.ndim == 1:
y = df._constructor_sliced(y, index=df.index)
else:
y = df._constructor(y, index=df.index)
return y
class RegressionModelsMethods(_AccessorMethods):
_module_name = 'sklearn.gaussian_process.regression_models'
_regression_methods = ['constant', 'linear', 'quadratic']
_attach_methods(RegressionModelsMethods, _wrap_data_func, _regression_methods)
|
bsd-3-clause
|
giacomov/XtDac
|
bin/chandra/xtc_gif_generator.py
|
1
|
8416
|
#!/usr/bin/env python
"""
Generate lightcurves for each candidate given a list of candidates
"""
# Set to a non-interactive matplotlib backend
import matplotlib
matplotlib.use("agg")
import argparse
import os
import sys
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import seaborn as sbs
from matplotlib.colors import LogNorm
from astropy.convolution import convolve_fft, convolve, Gaussian2DKernel
from matplotlib.animation import ArtistAnimation, FFMpegWriter
from XtDac.ChandraUtils.find_files import find_files
from XtDac.ChandraUtils import logging_system
from XtDac.ChandraUtils.run_command import CommandRunner
from XtDac.ChandraUtils.sanitize_filename import sanitize_filename
from XtDac.DivideAndConquer import XMMWCS
from XtDac.DivideAndConquer.HardwareUnit import hardwareUnitFactory
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Generate gifs to visualize transient')
parser.add_argument("--masterfile", help="Path to file containing list of transients",
required=True, type=str)
parser.add_argument("--data_path", help="Path to directory containing data", required=False, type=str, default='.')
parser.add_argument("--verbose-debug", action='store_true')
parser.add_argument("--cleanup", action='store_true')
# Get the logger
logger = logging_system.get_logger(os.path.basename(sys.argv[0]))
# Get the command runner
runner = CommandRunner(logger)
args = parser.parse_args()
data_path = sanitize_filename(args.data_path)
masterfile = sanitize_filename(args.masterfile)
transient_data = np.array(np.recfromtxt(masterfile, names=True), ndmin=1)
for transient in transient_data:
obsid = transient['Obsid']
ccd = transient['CCD']
candidate = transient['Candidate']
tstart = transient['Tstart']
tstop = transient['Tstop']
duration = tstop-tstart
event_files = find_files(data_path, "ccd_%s_*_filtered_*.fits" % (ccd))
assert len(event_files)==1, "Couldn't find event file. " \
"I was looking for %s" % ("ccd_%s_*_filtered_*.fits" % (ccd))
event_file = event_files[0]
# get start and stop time of observation
with pyfits.open(event_file, memmap=False) as fits_file:
# Get the start of the first GTI and the stop of the last one
gti_starts = []
gti_stops = []
for ext in fits_file[1:]:
if ext.header['EXTNAME'] == 'GTI':
gti_starts.append(ext.data.field("START").min())
gti_stops.append(ext.data.field("STOP").max())
frame_time = fits_file['EVENTS'].header['TIMEDEL']
tmin = min(gti_starts)
tmax = max(gti_stops)
# Get minimum and maximum X and Y, so we use always the same binning for the images
xmin, xmax = fits_file['EVENTS'].data.field("X").min(), fits_file['EVENTS'].data.field("X").max()
ymin, ymax = fits_file['EVENTS'].data.field("Y").min(), fits_file['EVENTS'].data.field("Y").max()
# Get position and transform to X,Y
ra = transient['RA']
dec = transient['Dec']
wcs = XMMWCS.XMMWCS(event_file, fits_file['EVENTS'].data.field("X"), fits_file['EVENTS'].data.field("Y"))
transient_x, transient_y = wcs.sky2xy([[ra, dec]])[0]
hwu = hardwareUnitFactory(event_file)
logger.info("Duration: %s" %duration)
logger.info("Tmin: %s" % tmin)
logger.info("Tmax: %s" %tmax)
logger.info("frame time: %s" % frame_time)
logger.info("Obs Time: %s" %(tmax-tmin))
logger.info("X,Y = (%.3f, %.3f)" % (transient_x, transient_y))
# Use the interval before the transient and the interval after the transient, as well as of course
# the interval of the transient itself
intervals = [tmin]
# Add tstart only if it is sufficiently different from tmin (so we don't get an empty interval when the
# transient is right at the beginning)
if abs(tstart - tmin) > (frame_time + frame_time / 2.0):
intervals.append(tstart)
intervals.append(tstop)
# Add tmax only if it is sufficiently different from tstop, so we don't get an empty interval when the
# transient is right at the end)
if abs(tmax - tstop) > (frame_time + frame_time / 2.0):
intervals.append(tmax)
intervals = sorted(intervals)
deltas = np.array(intervals[1:]) - np.array(intervals[:-1])
evt_name, evt_file_ext = os.path.splitext(os.path.basename(event_file))
# Create individual time interval files
images = []
for i in range(len(intervals)-1):
outfile = "cand_%s_%s_TI_%s%s" %(candidate, evt_name, i+1, evt_file_ext)
cmd_line = 'ftcopy \"%s[(TIME >= %s) && (TIME <= %s)]\" %s clobber=yes ' \
% (event_file, intervals[i], intervals[i+1], outfile)
runner.run(cmd_line)
images.append(outfile)
#create a list of frames that will be animated into a gif
frames = []
# Prepare bins
# xbins = np.linspace(xmin, xmax, 300)
# ybins = np.linspace(ymin, ymax, 300)
conv_kernel_size = max(np.ceil(float(transient['PSF_sizearcsec']) / hwu.getPixelScale()), 5)
# Create bins of size 1 (i.e., one pixel per bin)
xbins = np.arange(transient_x - 5 * conv_kernel_size, transient_x + 5 * conv_kernel_size + 1, 1)
ybins = np.arange(transient_y - 5 * conv_kernel_size, transient_y + 5 * conv_kernel_size + 1, 1)
logger.info("Image will be %i x %i pixels" % (xbins.shape[0], ybins.shape[0]))
fig = plt.figure()
sub = fig.add_subplot(111)
sub.set_title("ObsID %s, CCD %s \nTstart = %s, Tstop = %s (%i events)" % (obsid, ccd, tstart, tstop,
float(transient['N_events'])))
for i, image in enumerate(images):
# Compute interval duration
dt = intervals[i + 1] - intervals[i]
#get x and y coordinates of image from fits file
data = pyfits.getdata(image)
x = data.field("x")
y = data.field("y")
#create 2D histogram data from it
sbs.set(rc={'axes.facecolor': 'black', 'axes.grid': False})
hh, X, Y = np.histogram2d(x, y, bins=[xbins, ybins])
#smooth data
gauss_kernel = Gaussian2DKernel(stddev=max(conv_kernel_size / 8.0, 2.0))
smoothed_data_gauss = convolve_fft(hh, gauss_kernel, normalize_kernel=True)
if x.shape[0] > 0:
img = sub.imshow(smoothed_data_gauss, cmap='hot', animated=True, origin='lower')
else:
# No events in this image. Generate an empty image
img = sub.imshow(smoothed_data_gauss, cmap='hot', animated=True, origin='lower')
# Draw PSF circle
circ = Circle((smoothed_data_gauss.shape[0]/2.0 + 1, smoothed_data_gauss.shape[1]/2.0 + 1),
float(transient['PSF_sizearcsec']) / hwu.getPixelScale(),
fill=False, lw=2, color='green')
sub.add_patch(circ)
text = sub.annotate("%i" % (i+1), xy=(0.5, 0.03),
xycoords='figure fraction',
annotation_clip=False)
text3 = sub.annotate("Duration: %.2f s" % (dt), xy=(0.55, 0.13),
xycoords='figure fraction',
annotation_clip=False, color='green')
sub.set_yticklabels([])
sub.set_xticklabels([])
#add this image to list of frames
frames.append([img, text, text3, circ])
# Remove the image
if args.cleanup:
os.remove(image)
#animate and save gif
logger.info("Creating gif ObsID %s, CCD %s, Candidate %s...\n" %(obsid, ccd, candidate))
anim = ArtistAnimation(fig, frames, interval=2000)
anim.save("%s_cand_%s.gif" %(evt_name, candidate), writer='imagemagick')
plt.close()
|
bsd-3-clause
|
dhruv13J/scikit-learn
|
sklearn/externals/joblib/__init__.py
|
35
|
4382
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
pythonvietnam/scikit-learn
|
sklearn/pipeline.py
|
162
|
21103
|
"""
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
|
bsd-3-clause
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/pandas/tests/io/msgpack/test_extension.py
|
14
|
2204
|
from __future__ import print_function
import array
import pandas.io.msgpack as msgpack
from pandas.io.msgpack import ExtType
from .common import frombytes, tobytes
def test_pack_ext_type():
def p(s):
packer = msgpack.Packer()
packer.pack_ext_type(0x42, s)
return packer.bytes()
assert p(b'A') == b'\xd4\x42A' # fixext 1
assert p(b'AB') == b'\xd5\x42AB' # fixext 2
assert p(b'ABCD') == b'\xd6\x42ABCD' # fixext 4
assert p(b'ABCDEFGH') == b'\xd7\x42ABCDEFGH' # fixext 8
assert p(b'A' * 16) == b'\xd8\x42' + b'A' * 16 # fixext 16
assert p(b'ABC') == b'\xc7\x03\x42ABC' # ext 8
assert p(b'A' * 0x0123) == b'\xc8\x01\x23\x42' + b'A' * 0x0123 # ext 16
assert (p(b'A' * 0x00012345) ==
b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345) # ext 32
def test_unpack_ext_type():
def check(b, expected):
assert msgpack.unpackb(b) == expected
check(b'\xd4\x42A', ExtType(0x42, b'A')) # fixext 1
check(b'\xd5\x42AB', ExtType(0x42, b'AB')) # fixext 2
check(b'\xd6\x42ABCD', ExtType(0x42, b'ABCD')) # fixext 4
check(b'\xd7\x42ABCDEFGH', ExtType(0x42, b'ABCDEFGH')) # fixext 8
check(b'\xd8\x42' + b'A' * 16, ExtType(0x42, b'A' * 16)) # fixext 16
check(b'\xc7\x03\x42ABC', ExtType(0x42, b'ABC')) # ext 8
check(b'\xc8\x01\x23\x42' + b'A' * 0x0123,
ExtType(0x42, b'A' * 0x0123)) # ext 16
check(b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345,
ExtType(0x42, b'A' * 0x00012345)) # ext 32
def test_extension_type():
def default(obj):
print('default called', obj)
if isinstance(obj, array.array):
typecode = 123 # application specific typecode
data = tobytes(obj)
return ExtType(typecode, data)
raise TypeError("Unknwon type object %r" % (obj, ))
def ext_hook(code, data):
print('ext_hook called', code, data)
assert code == 123
obj = array.array('d')
frombytes(obj, data)
return obj
obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])]
s = msgpack.packb(obj, default=default)
obj2 = msgpack.unpackb(s, ext_hook=ext_hook)
assert obj == obj2
|
agpl-3.0
|
zhenv5/scikit-learn
|
examples/text/mlcomp_sparse_document_classification.py
|
292
|
4498
|
"""
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
|
bsd-3-clause
|
advancedplotting/aplot
|
python/plotserv/server.py
|
1
|
9014
|
# Copyright (c) 2014-2015, Heliosphere Research LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, os
import time
import psutil
import traceback
import multiprocessing
# core, Terminals -> imported by child process
import socket
from cStringIO import StringIO
import numpy as np
from . import errors
# "LKV" -> "LabVIEW Key-Value", i.e. an array of 2-string clusters
class ShouldStopException(Exception):
pass
class MessageTruncatedError(Exception):
"""
Raised when the complete message could not be read from a socket.
"""
pass
def readall(sock, size, max_recv_size=4096):
""" Read *size* bytes from socket and return a string.
Raises MessageTruncatedError if the read fails to complete.
"""
sio = StringIO()
while sio.tell() < size:
outstanding = size - sio.tell()
recv_size = outstanding if outstanding <= max_recv_size else max_recv_size
tmp = sock.recv(recv_size)
if tmp == '':
raise MessageTruncatedError("Socket message truncated (got %d bytes of %d)" % (sio.tell(), size))
sio.write(tmp)
sio.seek(0)
return sio.read()
def read_packed_string(sio):
""" Read a packed string from a file-like object """
s = sio.read(4)
if len(s) != 4:
raise MessageTruncatedError("Packed string length header corrupted")
slen = np.fromstring(s, dtype="=u4")
s = sio.read(slen)
if len(s) != slen:
raise MessageTruncatedError("Packed string content corrupted")
return s
def write_packed_string(sio, s):
""" Write a packed string to a file-like object """
sio.write(np.array(len(s), dtype='=u4').tostring())
sio.write(s)
def read_lkv_socket(sock):
""" Read an LKV message from the given socket and return it as a dict """
msg_len = np.fromstring(readall(sock, 4), dtype="=u4")
# Magic "stop" command
if msg_len == (2**32)-1:
raise ShouldStopException()
msg = readall(sock, msg_len)
sio = StringIO(msg)
sio.read(4) # discard array length header
dct = {}
while sio.tell() < msg_len:
name = read_packed_string(sio)
value = read_packed_string(sio)
dct[name] = value
return dct
def write_lkv_socket(sock, dct):
""" Convert a dictionary of strings to an LKV message and write it to the
socket. """
sio = StringIO()
sio.write('\x00'*4) # reserve space for length header
sio.write(np.array(len(dct), dtype='=u4').tostring()) # array length header
for name, value in dct.iteritems():
write_packed_string(sio, name)
write_packed_string(sio, value)
# Write length header
msg_len = sio.tell() - 4
sio.seek(0)
sio.write(np.array(msg_len, dtype='=u4').tostring())
sio.seek(0)
sock.sendall(sio.read())
class FastServer(object):
def __init__(self, port, callback):
self.port = port
self.callback = callback
def serve(self):
""" Serve until the EXIT message is received over TCP/IP. """
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('localhost', self.port))
s.listen(5)
print "FastServer bound and listening"
try:
while True:
conn, addr = s.accept()
try:
data = read_lkv_socket(conn)
response = self.callback(data)
write_lkv_socket(conn, response)
except ShouldStopException:
break
except Exception as e:
print e
finally:
conn.close()
finally:
s.close()
def handler_callback(dct):
def response(code, body):
return {'!status': '%d'%code, '!body': body if body is not None else ''}
resource = dct.pop('!resource')[4:] # Discard string length header
handler = core.RESOURCES.get(resource, None)
if handler is None:
print "Attempt to access illegal IPC resource %s" % resource
return response(errors.GENERAL_ERROR, "Illegal plot command %s" % resource)
print "Call %s "%resource
# When an exception occurs in the handler, print a traceback
# to the console and set the status code.
# Response body is just the last line, e.g. "ValueError: foo".
try:
tstart = time.time()
t = Terminals(dct)
body = handler(context, t) # Instantiate the handler object
tstop = time.time()
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
# We take the first entry in the list, as the others are evidently
# only needed for SyntaxError.
exc_message = traceback.format_exception_only(exc_type, exc_value)[0].strip()
# Strip off the exception type
exc_message = ''.join([x for x in exc_message.split(':')][1:]).strip()
# Pick the *second* entry in the list, because the first always
# points to the handler() call above.
exc_fname, exc_lineno, _x, _x = traceback.extract_tb(exc_tb)[1]
# Don't return the whole path to the file.
exc_fname = os.path.basename(exc_fname).replace('.py','')
estr = "%s [%s %d]" % (exc_message, exc_fname, exc_lineno)
# Print to console as well for debugging
traceback.print_tb(exc_tb)
print estr
# Known errors -> return the code and the message
# Unknown errors -> return code 5111, message, but also source information
try:
code = errors.CODES[exc_type]
except KeyError:
return response(errors.GENERAL_ERROR, estr)
else:
return response(code, exc_message)
else:
print " %d msec" % ((tstop-tstart)*1000.)
return response(0, body)
def run_server(port):
""" Target function for the child process """
# Doing the imports here saves memory in the parent process,
# which won't be using matplotlib anyway.
global core, context, Terminals
from . import core
from .terminals import Terminals
context = core.PlotContext()
print "Subprocess initialized successfully"
s = FastServer(port, handler_callback)
print "SHMEM/PIPE handshake successful"
s.serve()
def run():
""" Main function.
Starts a server in a child process, and kills it once LabView goes away.
"""
PORT = int(sys.argv[1])
LVPROCESS = int(sys.argv[2])
process_handle = psutil.Process(LVPROCESS)
print "APLOT %d:%d" % (LVPROCESS, PORT)
p = multiprocessing.Process(target=run_server, args=(PORT,))
p.start()
while True:
time.sleep(1)
# According to the docs, is_running() works correctly even if the PID
# value was re-used between LabView exiting and our polling.
if not process_handle.is_running():
print "LabVIEW terminated; shutting down"
p.terminate()
break
if not p.is_alive():
print "Subprocess died mysteriously; exiting."
break
|
bsd-3-clause
|
gcanasherrera/Weak-Lensing
|
Plotter_ObjectCreator.py
|
1
|
4854
|
# Name: Plotter_ObjectCreator.py
#
# Weak-Lensing Validation Program V
#
# Type: Python script
#
# Description: Plots feautres associated to the text files saved by WL_Simulation_execute_mag.py
#
from Class_ObjectCreator import ObjectCreator, MagnitudeExponential
from Class_CatalogReader import CatalogReader
from WL_Utils import sex_caller
import numpy as np #Maths arrays and more
import matplotlib.pyplot as plt #Plot Libraries
import seaborn as sns #Improvements for statistical-plots
from operator import truediv
import math
import matplotlib
matplotlib.rc('xtick', labelsize=60)
matplotlib.rc('ytick', labelsize=60)
FILTER = 'MH'
PICTURE = 'lhn1n1_2010apr_r_stack_fc_fix'
def m(F, a, b):
return -2.5*(math.log(F, 10)-math.log(a, 10)-b)
#Read txt file ' w2_53_stack_simulation.txt'
mag_input = np.genfromtxt(('{}_simulation_mag_input_{}.txt').format(PICTURE, FILTER))
mag_output_sex = np.genfromtxt(('{}_simulation_mag_output_sex_{}.txt').format(PICTURE, FILTER))
#mag_output_wayback = np.genfromtxt(('w2_53_stack_simulation_mag_output_wayback_{}.txt').format(FILTER))
mag_output_error_sex = np.genfromtxt(('{}_simulation_mag_output_error_sex_{}.txt').format(PICTURE, FILTER))
#mag_output_error_wayback = np.genfromtxt(('w2_53_stack_simulation_mag_output_error_wayback_{}.txt').format(FILTER))
flux_input = np.genfromtxt(('{}_simulation_flux_input_{}.txt').format(PICTURE, FILTER))
flux_output = np.genfromtxt(('{}_simulation_flux_output_{}.txt').format(PICTURE, FILTER))
flux_output_error = np.genfromtxt(('{}_simulation_flux_output_error_{}.txt').format(PICTURE, FILTER))
flux_output_max = np.genfromtxt(('{}_simulation_flux_output_max_{}.txt').format(PICTURE, FILTER))
flux_output_max_error = np.genfromtxt(('{}_simulation_flux_output_max_error_{}.txt').format(PICTURE, FILTER))
number_lost_objects = np.genfromtxt(('{}_simulation_number_lost_objects_{}.txt').format(PICTURE, FILTER))
param = ["mean_a", "mean_b"]
par = np.genfromtxt(('{}_axis_param_{}.txt').format(PICTURE, FILTER), names=param)
flux_input_iso = []
mag_input_iso = []
#PLOT 1: Number of lost Galaxies vs mag_input
sns.set(style="white", palette="muted", color_codes=True)
plt.figure()
#plt.title('Number of lost Galaxies vs mag_input_max')
number_lost_objects_per = number_lost_objects/np.amax(number_lost_objects)*100
plt.plot(mag_input, number_lost_objects_per, 'k-')
plt.xticks(color='k', size=26)
plt.yticks(color='k', size=26)
plt.xlabel('$m_{max}^{i}$', fontsize=44)
plt.ylabel('$n_{lost}/\%$', fontsize=44)
plt.show()
#PLOT 2: flux_out_max vs flux_input_max (Linear Scale)
normalization_th = 2*math.pi*par["mean_a"]*par["mean_b"]
sns.set(style="white", palette="muted", color_codes=True)
plt.figure()
#plt.title('flux_out_max vs flux_input (Linear Scale)')
plt.errorbar(flux_input, flux_output_max, flux_output_max_error, 0, fmt='ko')
#plt.xlim(9, 1e12)
#plt.ylim(9, 1e12)
plt.xlabel('$F(max)_{input}$', fontsize=24)
plt.ylabel('$F(max)_{output}$', fontsize=24)
plt.show()
#PLOT 3: flux_out_iso vs flux_input_iso
ratio_flux=map(truediv, flux_output, flux_input)
normalization_exp = np.mean(ratio_flux)
print '\nTheoretical Normalization: {}'.format(normalization_th)
print '\nExperimental Normalization: {}'.format(ratio_flux)
for i in range(len(flux_input)):
#flux_input_iso.append(ratio_flux[i]*flux_input[i])
flux_input_iso.append(normalization_th*flux_input[i])
sns.set(style="white", palette="muted", color_codes=True)
plt.figure()
#plt.title('flux_out_iso vs flux_input_iso (Linear Scale)')
plt.errorbar(flux_input_iso, flux_output, flux_output_error, 0, fmt='ko')
#plt.xlim(0.1, 1e12)
#plt.ylim(0.1, 1e12)
plt.xlabel('$F(iso)_{input}$', fontsize=24)
plt.ylabel('$F(iso)_{output}$', fontsize=24)
plt.show()
#PLOT 4: mag_output_iso vs mag_input_iso
ratio_mag=map(truediv, mag_output_sex, mag_input)
for i in flux_input_iso:
mag_input_iso.append(m(i, 100.228418351, 9.99901042564))
sns.set(style="white", palette="muted", color_codes=True)
plt.figure()
#plt.title('mag_out_iso vs mag_input_iso (Linear Scale)')
plt.errorbar(mag_input_iso, mag_output_sex, mag_output_error_sex, 0, fmt='ko')
plt.xlabel('$m_{iso}^{i}$', fontsize=44)
plt.ylabel('$m_{iso}^{o}$', fontsize=44)
plt.xticks(color='k', size=23)
plt.yticks(color='k', size=23)
plt.xlim(0,30)
plt.ylim(0,25)
plt.show()
#PLOT 5: Number of lost Galaxies vs mag_input_iso
sns.set(style="white", palette="muted", color_codes=True)
plt.figure()
#plt.title('Number of lost Galaxies vs mag_input_max')
number_lost_objects_per = number_lost_objects/np.amax(number_lost_objects)*100
plt.semilogy(mag_input_iso, number_lost_objects_per, 'k-')
plt.xticks(color='k', size=23)
plt.yticks(color='k', size=23)
plt.xlabel('$m_{iso}^{i}$', labelpad=10, fontsize=40)
plt.ylabel('$n_{lost}/\%$', fontsize=44)
#plt.xlim(0,30)
plt.ylim(0,110)
plt.show()
print 'END ! ! ! \n'
|
gpl-3.0
|
lehnertu/TEUFEL
|
scripts/plot_SingleFrequency.py
|
1
|
4014
|
#!/usr/bin/env python
# coding=UTF-8
import sys, time
import os.path
import argparse
import numpy as np
import h5py
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from matplotlib.patches import Circle
# magnetic field constant in N/A²
mu0 = 4*np.pi*1e-7
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the file name of the watch point HDF5 file')
parser.add_argument('freq', help="frequency in Hz", type=float)
print
args = parser.parse_args()
radfile = args.file
radOK = os.path.isfile(radfile)
if not radOK:
print "file not found"
sys.exit()
f = args.freq
# Open the file for reading
print "reading ",radfile
hdf = h5py.File(radfile, "r")
print hdf
print
# Get the groups
pos = hdf['ObservationPosition']
Nx = pos.attrs.get('Nx')
Ny = pos.attrs.get('Ny')
print "Nx=%d Ny=%d" % (Nx,Ny)
print pos
field = hdf['ElMagField']
print field
t0 = field.attrs.get('t0')
dt = field.attrs.get('dt')
nots = field.attrs.get('NOTS')
print "t0=%g dt=%g NOTS=%d" % (t0, dt, nots)
pos = np.array(pos)
a = np.array(field)
hdf.close()
print
AmplitudeX = np.empty([Nx, Ny])
PhaseX = np.empty([Nx, Ny])
AmplitudeY = np.empty([Nx, Ny])
PhaseY = np.empty([Nx, Ny])
X = np.empty([Nx, Ny])
Y = np.empty([Nx, Ny])
t = np.linspace(t0,t0+(nots-1)*dt,nots)
cft = np.cos(2*np.pi*f*t)
sft = np.sin(2*np.pi*f*t)
for ix in range(Nx):
for iy in range(Ny):
# field data
X[ix,iy] = pos[ix,iy,0]
Y[ix,iy] = pos[ix,iy,1]
trace = a[ix,iy]
data = trace.transpose()
# fourier components of the field
Ex = data[0]
cosEx = np.dot(Ex,cft)
sinEx = np.dot(Ex,sft)
cEx = cosEx + 1j*sinEx
Ey = data[1]
cosEy = np.dot(Ey,cft)
sinEy = np.dot(Ey,sft)
cEy = cosEy + 1j*sinEy
Ez = data[2]
cosEz = np.dot(Ez,cft)
sinEz = np.dot(Ez,sft)
cEz = cosEz + 1j*sinEz
Bx = data[3]
cosBx = np.dot(Bx,cft)
sinBx = np.dot(Bx,sft)
ccBx = cosBx - 1j*sinBx
By = data[4]
cosBy = np.dot(By,cft)
sinBy = np.dot(By,sft)
ccBy = cosBy - 1j*sinBy
Bz = data[5]
cosBz = np.dot(Bz,cft)
sinBz = np.dot(Bz,sft)
ccBz = cosBz - 1j*sinBz
# Poynting vector S = (E.cross.B)/my0
Sx = (cEy*ccBz - cEz*ccBy) *dt/mu0
Sy = (cEz*ccBx - cEx*ccBz) *dt/mu0
Sz = (cEx*ccBy - cEy*ccBx) *dt/mu0
# Amplitude and Phase
# Amplitude[ix,iy] = np.absolute(Sz)
# Phase[ix,iy] = np.angle(Sz)
AmplitudeX[ix,iy] = np.absolute(cEx)*dt
PhaseX[ix,iy] = np.angle(cEx)
AmplitudeY[ix,iy] = np.absolute(cEy)*dt
PhaseY[ix,iy] = np.angle(cEy)
# figure with power density on screen
dX = pos[1,0,0]-pos[0,0,0]
dY = pos[0,1,1]-pos[0,0,1]
print "dx=%g dy=%g m" % (dX,dY)
# Etot = Amplitude.sum()*dX*dY
# print "integrated energy = ", 1e6*Etot, " µJ"
maxX = np.max(AmplitudeX)
maxY = np.max(AmplitudeY)
maxV = np.max([maxX,maxY])
print "max = %g" % maxV
expo = np.rint(np.log10(maxV))
scale = np.power(10,-expo)
print "scale = %g" % scale
fig1 = plt.figure(1,figsize=(11,9))
ax1 = fig1.add_subplot(221)
plt.pcolormesh(X, Y, scale*AmplitudeX, cmap='CMRmap', vmin=0, vmax=scale*maxV)
plt.title('integrated $E_x$ amplitude [V/m s]')
plt.xlabel('x /m')
plt.ylabel('y /m')
cb=plt.colorbar()
cb.set_label(r'$10^{%d}$ Vs/m' % expo)
ax2 = fig1.add_subplot(222)
plt.pcolormesh(X, Y, PhaseX, cmap='seismic')
plt.title('$E_x$ phase [rad]')
plt.xlabel('x /m')
plt.ylabel('y /m')
cb=plt.colorbar()
ax3 = fig1.add_subplot(223)
plt.pcolormesh(X, Y, scale*AmplitudeY, cmap='CMRmap', vmin=0, vmax=scale*maxV)
plt.title('integrated $E_y$ amplitude [V/m s]')
plt.xlabel('x /m')
plt.ylabel('y /m')
cb=plt.colorbar()
cb.set_label(r'$10^{%d}$ Vs/m' % expo)
ax4 = fig1.add_subplot(224)
plt.pcolormesh(X, Y, PhaseY, cmap='seismic')
plt.title('$E_y$ phase [rad]')
plt.xlabel('x /m')
plt.ylabel('y /m')
cb=plt.colorbar()
fig1.tight_layout()
plt.show()
|
gpl-3.0
|
YuepengGuo/backtrader
|
samples/data-pandas/data-pandas.py
|
2
|
2647
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import backtrader as bt
import backtrader.feeds as btfeeds
import pandas
def runstrat():
args = parse_args()
# Create a cerebro entity
cerebro = bt.Cerebro(stdstats=False)
# Add a strategy
cerebro.addstrategy(bt.Strategy)
# Get a pandas dataframe
datapath = ('../../datas/2006-day-001.txt')
# Simulate the header row isn't there if noheaders requested
skiprows = 1 if args.noheaders else 0
header = None if args.noheaders else 0
dataframe = pandas.read_csv(datapath,
skiprows=skiprows,
header=header,
parse_dates=True,
index_col=0)
if not args.noprint:
print('--------------------------------------------------')
print(dataframe)
print('--------------------------------------------------')
# Pass it to the backtrader datafeed and add it to the cerebro
data = bt.feeds.PandasData(dataname=dataframe)
cerebro.adddata(data)
# Run over everything
cerebro.run()
# Plot the result
cerebro.plot(style='bar')
def parse_args():
parser = argparse.ArgumentParser(
description='Pandas test script')
parser.add_argument('--noheaders', action='store_true', default=False,
required=False,
help='Do not use header rows')
parser.add_argument('--noprint', action='store_true', default=False,
help='Print the dataframe')
return parser.parse_args()
if __name__ == '__main__':
runstrat()
|
gpl-3.0
|
aminert/scikit-learn
|
sklearn/mixture/tests/test_dpgmm.py
|
261
|
4490
|
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
|
bsd-3-clause
|
mrtommyb/single-systems
|
singlesystems.py
|
1
|
16484
|
from __future__ import division, print_function
import os
import requests
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
from scipy.stats import gamma
from scipy.optimize import minimize
import emcee
from scipy.integrate import nquad
class Singlesystems(object):
def __init__(self, stlr, kois):
"""
stlr is the stellar catalog
kois is the koi catalog
"""
self.stlr = stlr
self.kois = kois
def samplesetup(self,
planetperiod=[-np.inf, np.inf],
planetradius=[-np.inf, np.inf],
startemp=[-np.inf, np.inf],
starradius=[-np.inf, np.inf],
starmass=[-np.inf, np.inf],
dataspan=[-np.inf, np.inf],
dutycycle=[-np.inf, np.inf],
rrmscdpp07p5=[-np.inf, np.inf],
requirestarmass=True,
periodgridspacing=57,
radiusgridspacing=61,
bounds=[(-5, 5), (-5, 5), (-5, 5)],
comp=None
):
"""
this will change the state of self.stlr and self.kois
"""
self.planetperiod = planetperiod
self.planetradius = planetradius
self.bounds = bounds
# make cuts on the stellar catalog
m = (startemp[0] <= self.stlr.teff) & (self.stlr.teff <= startemp[1])
m &= (starradius[0] <= self.stlr.radius) & (self.stlr.radius <= starradius[1])
m &= (starmass[0] <= self.stlr.mass) & (self.stlr.mass <= starmass[1])
m &= (dataspan[0] <= self.stlr.dataspan) & (self.stlr.dataspan <= dataspan[1])
m &= (dutycycle[0] <= self.stlr.dutycycle) & (self.stlr.dutycycle <= dutycycle[1])
m &= (rrmscdpp07p5[0] <= self.stlr.rrmscdpp07p5) & (self.stlr.rrmscdpp07p5 <= rrmscdpp07p5[1])
if requirestarmass:
m &= np.isfinite(self.stlr.mass)
self.stlr = pd.DataFrame(self.stlr[m])
self.selectedstars = len(self.stlr)
# Join on the stellar list.
self.kois = pd.merge(self.kois, self.stlr[["kepid"]],
on="kepid", how="inner")
# make cuts based on the planets catalog
m = self.kois.koi_pdisposition == "CANDIDATE"
m &= (planetperiod[0] <= self.kois.koi_period) & (self.kois.koi_period <= planetperiod[1])
m &= np.isfinite(self.kois.koi_prad) & (planetradius[0] <= self.kois.koi_prad) & (self.kois.koi_prad <= planetradius[1])
self.kois = pd.DataFrame(self.kois[m])
self.selectedkois = len(self.kois)
self._setcompleteness(periodgridspacing, radiusgridspacing, comp)
def _setcompleteness(self, periodgridspacing, radiusgridspacing, comp):
self.cdpp_cols = [k for k in self.stlr.keys() if k.startswith("rrmscdpp")]
self.cdpp_vals = np.array([k[-4:].replace("p", ".") for k in self.cdpp_cols], dtype=float)
# Pre-compute and freeze the gamma function from Equation (5) in
# Burke et al.
self.pgam = gamma(4.65, loc=0., scale=0.98)
self.mesthres_cols = [k for k in self.stlr.keys() if k.startswith("mesthres")]
self.mesthres_vals = np.array([k[-4:].replace("p", ".") for k in self.mesthres_cols],
dtype=float)
period = np.linspace(self.planetperiod[0], self.planetperiod[1], periodgridspacing)
rp = np.linspace(self.planetradius[0], self.planetradius[1], radiusgridspacing)
self.period_grid, self.rp_grid = np.meshgrid(period, rp, indexing="ij")
self.koi_periods = np.array(self.kois.koi_period)
self.koi_rps = np.array(self.kois.koi_prad)
self.vol = np.diff(self.period_grid, axis=0)[:, :-1] * np.diff(self.rp_grid, axis=1)[:-1, :]
if comp is None:
comp = np.zeros_like(self.period_grid)
for _, star in self.stlr.iterrows():
comp += self.get_completeness(star, self.period_grid, self.rp_grid, 0.0, with_geom=True)
self.comp = comp
else:
self.comp = comp
def optimize(self):
theta_0 = np.array([np.log(0.75), -0.53218, -1.5])
r = minimize(self.nll, theta_0, method="L-BFGS-B", bounds=self.bounds)
return r.x
def mcmc(self):
theta_opt = self.optimize()
ndim, nwalkers = len(theta_opt), 16
pos = [theta_opt + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnprob)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, 1000)
sampler.reset()
# Production.
pos, _, _ = sampler.run_mcmc(pos, 4000)
return sampler.flatchain
def get_pdet(self, star, aor, period, rp, e):
"""
Equation (5) from Burke et al. Estimate the detection efficiency
for a transit.
:param star: a pandas row giving the stellar properties
:param aor: the dimensionless semi-major axis (scaled
by the stellar radius)
:param period: the period in days
:param rp: the planet radius in Earth radii
:param e: the orbital eccentricity
"""
tau = self.get_duration(period, aor, e) * 24.
mes = self.get_mes(star, period, rp, tau)
mest = np.interp(tau, self.mesthres_vals,
np.array(star[self.mesthres_cols],
dtype=float))
x = mes - 4.1 - (mest - 7.1)
return self.pgam.cdf(x)
def get_pwin(self, star, period):
"""
Equation (6) from Burke et al. Estimates the window function
using a binomial distribution.
:param star: a pandas row giving the stellar properties
:param period: the period in days
"""
M = star.dataspan / period
f = star.dutycycle
omf = 1.0 - f
pw = 1 - omf**M - M*f*omf**(M-1) - 0.5*M*(M-1)*f*f*omf**(M-2)
msk = (pw >= 0.0) * (M >= 2.0)
return pw * msk
def get_pgeom(self, aor, e):
"""
The geometric transit probability.
See e.g. Kipping (2014) for the eccentricity factor
http://arxiv.org/abs/1408.1393
:param aor: the dimensionless semi-major axis (scaled
by the stellar radius)
:param e: the orbital eccentricity
"""
return 1. / (aor * (1 - e*e)) * (aor > 1.0)
def get_completeness(self, star, period, rp, e, with_geom=True):
"""
A helper function to combine all the completeness effects.
:param star: a pandas row giving the stellar properties
:param period: the period in days
:param rp: the planet radius in Earth radii
:param e: the orbital eccentricity
:param with_geom: include the geometric transit probability?
"""
aor = self.get_a(period, star.mass) / star.radius
pdet = self.get_pdet(star, aor, period, rp, e)
pwin = self.get_pwin(star, period)
if not with_geom:
return pdet * pwin
pgeom = self.get_pgeom(aor, e)
return pdet * pwin * pgeom
# A double power law model for the population.
def population_model(self, theta, period, rp):
lnf0, beta, alpha = theta
v = np.exp(lnf0) * np.ones_like(period)
for x, rng, n in zip((period, rp),
(self.planetperiod, self.planetradius),
(beta, alpha)):
n1 = n + 1
v *= x**n*n1 / (rng[1]**n1-rng[0]**n1)
return v
# The ln-likelihood function given at the top of this post.
def lnlike(self, theta):
pop = self.population_model(theta, self.period_grid, self.rp_grid) * self.comp
pop = 0.5 * (pop[:-1, :-1] + pop[1:, 1:])
norm = np.sum(pop * self.vol)
ll = np.sum(np.log(self.population_model(theta, self.koi_periods, self.koi_rps))) - norm
return ll if np.isfinite(ll) else -np.inf
# The ln-probability function is just propotional to the ln-likelihood
# since we're assuming uniform priors.
def lnprob(self, theta):
# Broad uniform priors.
for t, rng in zip(theta, self.bounds):
if not rng[0] < t < rng[1]:
return -np.inf
return self.lnlike(theta)
# The negative ln-likelihood is useful for optimization.
# Optimizers want to *minimize* your function.
def nll(self, theta):
ll = self.lnlike(theta)
return -ll if np.isfinite(ll) else 1e15
def get_duration(self, period, aor, e):
"""
Equation (1) from Burke et al. This estimates the transit
duration in the same units as the input period. There is a
typo in the paper (24/4 = 6 != 4).
:param period: the period in any units of your choosing
:param aor: the dimensionless semi-major axis (scaled
by the stellar radius)
:param e: the eccentricity of the orbit
"""
return 0.25 * period * np.sqrt(1 - e**2) / aor
def get_a(self, period, mstar, Go4pi=2945.4625385377644/(4*np.pi*np.pi)):
"""
Compute the semi-major axis of an orbit in Solar radii.
:param period: the period in days
:param mstar: the stellar mass in Solar masses
"""
return (Go4pi*period*period*mstar) ** (1./3)
def get_delta(self, k, c=1.0874, s=1.0187):
"""
Estimate the approximate expected transit depth as a function
of radius ratio. There might be a typo here. In the paper it
uses c + s*k but in the public code, it is c - s*k:
https://github.com/christopherburke/KeplerPORTs
:param k: the dimensionless radius ratio between the planet and
the star
"""
delta_max = k*k * (c + s*k)
return 0.84 * delta_max
def get_mes(self,star, period, rp, tau, re=0.009171):
"""
Estimate the multiple event statistic value for a transit.
:param star: a pandas row giving the stellar properties
:param period: the period in days
:param rp: the planet radius in Earth radii
:param tau: the transit duration in hours
"""
# Interpolate to the correct CDPP for the duration.
cdpp = np.array(star[self.cdpp_cols], dtype=float)
sigma = np.interp(tau, self.cdpp_vals, cdpp)
# Compute the radius ratio and estimate the S/N.
k = rp * re / star.radius
snr = self.get_delta(k) * 1e6 / sigma
# Scale by the estimated number of transits.
ntrn = star.dataspan * star.dutycycle / period
return snr * np.sqrt(ntrn)
# A double power law model for the population.
def population_model2(self,period, rp, theta):
v = self.population_model(theta, period, rp, )
return v
def return_occurrence(self, parameters,
planetradius, planetperiod):
occ = nquad(self.population_model2,
[[planetperiod[0],planetperiod[1]], [planetradius[0], planetradius[1]]], args=[parameters])[0]
return occ
def return_occurrence_samples(self, samplechain, sampsize,
planetradius, planetperiod):
samp = np.zeros(sampsize)
for i,x in enumerate(
np.random.choice(range(len(samplechain)),
size=sampsize)):
samp[i] = nquad(self.population_model2,
[[planetperiod[0],planetperiod[1]], [planetradius[0], planetradius[1]]], args=[samplechain[x]])[0]
self.samp = samp
self.occurence_rate_median = np.median(samp)
return samp
def return_occurrence_sample_semi(self, samplechain, sampsize,
planetradius, planetsemi, starmass):
"""
planet semi is a pair of values with the inner and outer range
in AU
starmass is an point estimate right now in solar masses
"""
G = 6.67408E-11
AU = 1.496E+11
planetsemi = np.array(planetsemi)
planetsemi *= AU
starmass *= 1.989E30
planetperiod = ((4. * np.pi**2 * planetsemi**3) / (G * starmass))**0.5 / 86400.
samp = np.zeros(sampsize)
for i,x in enumerate(
np.random.choice(range(len(samplechain)),
size=sampsize)):
samp[i] = nquad(self.population_model2,
[[planetperiod[0],planetperiod[1]], [planetradius[0], planetradius[1]]], args=[samplechain[x]])[0]
self.samp = samp
return samp
class Singletypes(Singlesystems):
def __init__(self, stlr, kois):
"""
stlr is the stellar catalog
kois is the koi catalog
"""
self.stlr = stlr
self.kois = kois
super(Singlesystems, self).__init__()
def samplesetup(self,
planetperiod=[-np.inf, np.inf],
planetradius=[-np.inf, np.inf],
startype='G',
starradius=[-np.inf, np.inf],
starmass=[-np.inf, np.inf],
dataspan=[-np.inf, np.inf],
dutycycle=[-np.inf, np.inf],
rrmscdpp07p5=[-np.inf, np.inf],
requirestarmass=True,
periodgridspacing=57,
radiusgridspacing=61,
bounds=[(-5, 5), (-5, 5), (-5, 5)],
comp=None
):
self.planetperiod = planetperiod
self.planetradius = planetradius
self.bounds = bounds
# make cuts on the stellar catalog
m = self.maskstartype(startype)
m &= (starradius[0] <= self.stlr.radius) & (self.stlr.radius <= starradius[1])
m &= (starmass[0] <= self.stlr.mass) & (self.stlr.mass <= starmass[1])
m &= (dataspan[0] <= self.stlr.dataspan) & (self.stlr.dataspan <= dataspan[1])
m &= (dutycycle[0] <= self.stlr.dutycycle) & (self.stlr.dutycycle <= dutycycle[1])
m &= (rrmscdpp07p5[0] <= self.stlr.rrmscdpp07p5) & (self.stlr.rrmscdpp07p5 <= rrmscdpp07p5[1])
if requirestarmass:
m &= np.isfinite(self.stlr.mass)
self.stlr = pd.DataFrame(self.stlr[m])
self.selectedstars = len(self.stlr)
# Join on the stellar list.
self.kois = pd.merge(self.kois, self.stlr[["kepid"]],
on="kepid", how="inner")
# make cuts based on the planets catalog
m = self.kois.koi_pdisposition == "CANDIDATE"
m &= (planetperiod[0] <= self.kois.koi_period) & (self.kois.koi_period <= planetperiod[1])
m &= np.isfinite(self.kois.koi_prad) & (planetradius[0] <= self.kois.koi_prad) & (self.kois.koi_prad <= planetradius[1])
self.kois = pd.DataFrame(self.kois[m])
self.selectedkois = len(self.kois)
self._setcompleteness(periodgridspacing, radiusgridspacing, comp)
def maskstartype(self, startype):
if startype == 'M':
m = (0 <= self.stlr.teff) & (self.stlr.teff <= 3900)
elif startype == 'K':
m = (3900 < self.stlr.teff) & (self.stlr.teff <= 5300)
elif startype == 'G':
m = (5300 < self.stlr.teff) & (self.stlr.teff <= 6000)
elif startype == 'F':
m = (6000 < self.stlr.teff) & (self.stlr.teff <= 7500)
elif startype == 'A':
m = (7500 < self.stlr.teff) & (self.stlr.teff <= 10000)
return m
def get_catalog(name, basepath="data"):
fn = os.path.join(basepath, "{0}.h5".format(name))
if os.path.exists(fn):
return pd.read_hdf(fn, name)
if not os.path.exists(basepath):
os.makedirs(basepath)
print("Downloading {0}...".format(name))
url = ("http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/"
"nph-nstedAPI?table={0}&select=*").format(name)
r = requests.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
fh = BytesIO(r.content)
df = pd.read_csv(fh)
df.to_hdf(fn, name, format="t")
return df
|
mit
|
Moshiasri/learning
|
Python_dataCamp/NumpyScatterPlot_PopGdpLifeExpectancy.py
|
1
|
9100
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 11 21:04:08 2016
@author: Mohtashim
"""
pop = [31.889923, 3.6005229999999999, 33.333216, 12.420476000000001, 40.301926999999999, 20.434176000000001, 8.199783, 0.70857300000000001, 150.448339, 10.392226000000001, 8.0783140000000007, 9.1191519999999997, 4.5521979999999997, 1.6391309999999999, 190.01064700000001, 7.3228580000000001, 14.326203, 8.3905049999999992, 14.131857999999999, 17.696293000000001, 33.390141, 4.3690379999999998, 10.238807, 16.284741, 1318.683096, 44.227550000000001, 0.71096000000000004, 64.606758999999997, 3.8006099999999998, 4.1338840000000001, 18.013408999999999, 4.4933120000000004, 11.416987000000001, 10.228744000000001, 5.4681199999999999, 0.49637399999999998, 9.3196220000000007, 13.75568, 80.264543000000003, 6.9396880000000003, 0.55120100000000005, 4.9065849999999998, 76.511887000000002, 5.2384599999999999, 61.083916000000002, 1.4548669999999999, 1.6883589999999999, 82.400996000000006, 22.873338, 10.706289999999999, 12.572927999999999, 9.9478139999999993, 1.4720409999999999, 8.5028140000000008, 7.4837629999999997, 6.9804120000000003, 9.9561080000000004, 0.301931, 1110.3963309999999, 223.547, 69.453569999999999, 27.499638000000001, 4.1090859999999996, 6.426679, 58.147733000000002, 2.780132, 127.467972, 6.0531930000000003, 35.610177, 23.301725000000001, 49.044789999999999, 2.5055589999999999, 3.921278, 2.0126490000000001, 3.1939419999999998, 6.0369140000000003, 19.167653999999999, 13.327078999999999, 24.821286000000001, 12.031795000000001, 3.2700650000000002, 1.250882, 108.700891, 2.8741270000000001, 0.68473600000000001, 33.757174999999997, 19.951656, 47.761980000000001, 2.0550799999999998, 28.901789999999998, 16.570613000000002, 4.1157709999999996, 5.6753559999999998, 12.894864999999999, 135.03116399999999, 4.6279260000000004, 3.2048969999999999, 169.27061699999999, 3.2421730000000002, 6.6671469999999999, 28.674757, 91.077286999999998, 38.518241000000003, 10.642836000000001, 3.942491, 0.79809399999999997, 22.276056000000001, 8.8605879999999999, 0.19957900000000001, 27.601037999999999, 12.267493, 10.150264999999999, 6.1445619999999996, 4.5530090000000003, 5.4475020000000001, 2.0092449999999999, 9.1187729999999991, 43.997827999999998, 40.448191000000001, 20.378239000000001, 42.292929000000001, 1.1330659999999999, 9.0310880000000004, 7.5546610000000003, 19.314747000000001, 23.174294, 38.13964, 65.068149000000005, 5.7015789999999997, 1.056608, 10.276158000000001, 71.158647000000002, 29.170397999999999, 60.776237999999999, 301.13994700000001, 3.4474960000000001, 26.084662000000002, 85.262355999999997, 4.018332, 22.211742999999998, 11.746034999999999, 12.311143]
gdp_cap = [974.58033839999996, 5937.0295259999984, 6223.3674650000003, 4797.2312670000001, 12779.379639999999, 34435.367439999995, 36126.492700000003, 29796.048340000001, 1391.253792, 33692.605080000001, 1441.2848730000001, 3822.137084, 7446.2988029999997, 12569.851769999999, 9065.8008250000003, 10680.792820000001, 1217.0329939999999, 430.07069159999998, 1713.7786860000001, 2042.0952400000001, 36319.235009999997, 706.01653699999997, 1704.0637240000001, 13171.638849999999, 4959.1148540000004, 7006.5804189999999, 986.14787920000003, 277.55185870000003, 3632.5577979999998, 9645.06142, 1544.7501119999999, 14619.222719999998, 8948.1029230000004, 22833.308509999999, 35278.418740000001, 2082.4815670000007, 6025.3747520000015, 6873.2623260000009, 5581.1809979999998, 5728.3535140000004, 12154.089749999999, 641.36952360000021, 690.80557590000001, 33207.0844, 30470.0167, 13206.48452, 752.74972649999995, 32170.37442, 1327.6089099999999, 27538.41188, 5186.0500030000003, 942.6542111, 579.23174299999982, 1201.637154, 3548.3308460000007, 39724.978669999997, 18008.944439999999, 36180.789190000003, 2452.210407, 3540.6515639999998, 11605.71449, 4471.0619059999999, 40675.996350000001, 25523.277099999999, 28569.719700000001, 7320.8802620000015, 31656.068060000001, 4519.4611709999999, 1463.249282, 1593.06548, 23348.139730000006, 47306.989780000004, 10461.05868, 1569.3314419999999, 414.5073415, 12057.49928, 1044.7701259999999, 759.34991009999999, 12451.6558, 1042.581557, 1803.151496, 10956.991120000001, 11977.57496, 3095.7722710000007, 9253.896111, 3820.1752299999998, 823.68562050000003, 944.0, 4811.0604290000001, 1091.359778, 36797.933319999996, 25185.009109999999, 2749.3209649999999, 619.67689239999982, 2013.9773049999999, 49357.190170000002, 22316.192869999999, 2605.94758, 9809.1856360000002, 4172.8384640000004, 7408.9055609999996, 3190.4810160000002, 15389.924680000002, 20509.64777, 19328.709009999999, 7670.122558, 10808.47561, 863.08846390000019, 1598.4350890000001, 21654.83194, 1712.4721360000001, 9786.5347139999994, 862.54075610000018, 47143.179640000002, 18678.314350000001, 25768.257590000001, 926.14106830000003, 9269.6578079999999, 28821.063699999999, 3970.0954069999998, 2602.3949950000001, 4513.4806429999999, 33859.748350000002, 37506.419070000004, 4184.5480889999999, 28718.276839999999, 1107.482182, 7458.3963269999977, 882.9699437999999, 18008.509239999999, 7092.9230250000001, 8458.2763840000007, 1056.3801209999999, 33203.261279999999, 42951.65309, 10611.46299, 11415.805689999999, 2441.5764039999999, 3025.3497980000002, 2280.769906, 1271.211593, 469.70929810000007]
life_exp = life_exp = [43.828000000000003, 76.423000000000002, 72.301000000000002,
42.731000000000002, 75.319999999999993, 81.234999999999999,
79.828999999999994, 75.635000000000005, 64.061999999999998,
79.441000000000003, 56.728000000000002, 65.554000000000002,
74.852000000000004, 50.728000000000002, 72.390000000000001,
73.004999999999995, 52.295000000000002, 49.579999999999998,
59.722999999999999, 50.43, 80.653000000000006, 44.741000000000007,
50.651000000000003, 78.552999999999997, 72.960999999999999,
72.888999999999996, 65.152000000000001, 46.462000000000003,
55.322000000000003, 78.781999999999996, 48.328000000000003,
75.748000000000005, 78.272999999999996, 76.486000000000004,
78.331999999999994, 54.790999999999997, 72.234999999999999,
74.994, 71.338000000000022, 71.878, 51.578999999999994,
58.039999999999999, 52.947000000000003, 79.313000000000002,
80.656999999999996, 56.734999999999999, 59.448, 79.406000000000006,
60.021999999999998, 79.483000000000004, 70.259, 56.006999999999998,
46.388000000000012, 60.915999999999997, 70.198000000000008,
82.207999999999998, 73.338000000000022, 81.757000000000005,
64.698000000000008, 70.650000000000006, 70.963999999999999,
59.545000000000002, 78.885000000000005, 80.745000000000005,
80.546000000000006, 72.566999999999993, 82.602999999999994,
72.534999999999997, 54.109999999999999, 67.296999999999997,
78.623000000000005, 77.588000000000022, 71.992999999999995,
42.591999999999999, 45.677999999999997, 73.951999999999998,
59.443000000000012, 48.302999999999997, 74.241, 54.466999999999999,
64.164000000000001, 72.801000000000002, 76.194999999999993,
66.802999999999997, 74.543000000000006, 71.164000000000001,
42.082000000000001, 62.069000000000003, 52.906000000000013,
63.784999999999997, 79.762, 80.203999999999994, 72.899000000000001,
56.866999999999997, 46.859000000000002, 80.195999999999998,
75.640000000000001, 65.483000000000004, 75.536999999999978,
71.751999999999995, 71.421000000000006, 71.688000000000002,
75.563000000000002, 78.097999999999999, 78.746000000000024,
76.441999999999993, 72.475999999999999, 46.241999999999997,
65.528000000000006, 72.777000000000001, 63.061999999999998,
74.001999999999995, 42.568000000000012, 79.971999999999994,
74.662999999999997, 77.926000000000002, 48.158999999999999,
49.338999999999999, 80.941000000000003, 72.396000000000001,
58.555999999999997, 39.613, 80.884, 81.701000000000022,
74.143000000000001, 78.400000000000006, 52.517000000000003,
70.616, 58.420000000000002, 69.819000000000003, 73.923000000000002,
71.777000000000001, 51.542000000000002, 79.424999999999997,
78.242000000000004, 76.384, 73.747, 74.248999999999995,
73.421999999999997, 62.698, 42.383999999999993, 43.487000000000002]
# Import numpy as np
import numpy as np
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Store pop as a numpy array: np_pop
np_pop = np.array(pop)
# Double np_pop
np_pop = np_pop * 2
# Update: set s argument to np_pop
plt.scatter(gdp_cap, life_exp, s = np_pop, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000, 10000, 100000],['1k', '10k', '100k'])
# Add grid() call
plt.grid(True)
# Display the plot
plt.show()
|
gpl-3.0
|
nkarast/Snippets
|
Python/animatePlot.py
|
1
|
1453
|
"""
This creates an "animated" movie of a scatter plot being filled
element by element.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
x = np.linspace(1,1000,50)
y = np.linspace(1000,2000, 50)
fig = plt.figure(figsize = (5,5))
axes = fig.add_subplot(111)
axes.set_xlim(min(x), max(x))
axes.set_ylim(min(y), max(y))
time_template = 'Turn = %i'
time_text = axes.text(0.05, 0.9, '', transform=axes.transAxes)
global t
def animate(coords):
time_text.set_text(time_template % coords[2])
return plt.scatter([coords[0]],[coords[1]], color='b'), time_text
def frames():
for xt, yt, turn in zip(x, y, np.linspace(1,len(x))):
yield xt, yt, turn
anim = animation.FuncAnimation(fig, animate,
frames=frames, interval=100, blit=False) #set to true, crashes on mac
anim.save("animate_map.mp4")
plt.show()
# ####-----
# import numpy as np
# from matplotlib import pyplot as plt
# from matplotlib import animation
# import seaborn as sns
# nx = 50
# ny = 50
# fig = plt.figure()
# data = np.random.rand(nx, ny)
# sns.heatmap(data, vmax=.8, square=True)
# def init():
# sns.heatmap(np.zeros((nx, ny)), vmax=.8, square=True)
# def animate(i):
# data = np.random.rand(nx, ny)
# sns.heatmap(data, vmax=.8, square=True)
# anim = animation.FuncAnimation(fig, animate, init_func=init, frames=20, repeat = False)
# anim.save("test.mp4")
# #plt.show()
|
gpl-3.0
|
toscanosaul/water
|
visualizer.py
|
10
|
2005
|
#!/usr/bin/env python
"""
Visualize shallow water simulation results.
NB: Requires a modern Matplotlib version; also needs
either FFMPeg (for MP4) or ImageMagick (for GIF)
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as manimation
import sys
def main(infile="waves.out", outfile="out.mp4", startpic="start.png"):
"""Visualize shallow water simulation results.
Args:
infile: Name of input file generated by simulator
outfile: Desired output file (mp4 or gif)
startpic: Name of picture generated at first frame
"""
u = np.fromfile(infile, dtype=np.dtype('f4'))
nx = int(u[0])
ny = int(u[1])
x = range(0,nx)
y = range(0,ny)
u = u[2:]
nframe = len(u) // (nx*ny)
stride = nx // 20
u = np.reshape(u, (nframe,nx,ny))
X, Y = np.meshgrid(x,y)
fig = plt.figure(figsize=(10,10))
def plot_frame(i, stride=5):
ax = fig.add_subplot(111, projection='3d')
ax.set_zlim(0, 2)
Z = u[i,:,:];
ax.plot_surface(X, Y, Z, rstride=stride, cstride=stride)
return ax
if startpic:
ax = plot_frame(0)
plt.savefig(startpic)
plt.delaxes(ax)
metadata = dict(title='Wave animation', artist='Matplotlib')
if outfile[-4:] == ".mp4":
Writer = manimation.writers['ffmpeg']
writer = Writer(fps=15, metadata=metadata,
extra_args=["-r", "30",
"-c:v", "libx264",
"-pix_fmt", "yuv420p"])
elif outfile[-4:] == ".gif":
Writer = manimation.writers['imagemagick']
writer = Writer(fps=15, metadata=metadata)
with writer.saving(fig, outfile, nframe):
for i in range(nframe):
ax = plot_frame(i)
writer.grab_frame()
plt.delaxes(ax)
if __name__ == "__main__":
main(*sys.argv[1:])
|
mit
|
ernestyalumni/MLgrabbag
|
visualization/bokehplus/anscombe.py
|
1
|
1523
|
"""
@file anscombe.py
@url https://bokeh.pydata.org/en/latest/docs/gallery/anscombe.html
@brief Anscombe's Quartet
@details Anscombe's quartet is a collection of 4 small datasets that have
nearly identical simple descriptive statistics (mean, variance, correlation,
and linear regression lines), yet appear very different when graphed.
"""
from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.layouts import column, gridplot
from bokeh.models import Circle, ColumnDataSource, Div, Grid, Line, \
LinearAxis, Plot, Range1d
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
|
mit
|
ilo10/scikit-learn
|
sklearn/datasets/svmlight_format.py
|
114
|
15826
|
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
bsd-3-clause
|
vivekmishra1991/scikit-learn
|
examples/linear_model/plot_multi_task_lasso_support.py
|
249
|
2211
|
#!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
|
bsd-3-clause
|
ephes/scikit-learn
|
examples/hetero_feature_union.py
|
288
|
6236
|
"""
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
|
bsd-3-clause
|
ltiao/scikit-learn
|
examples/model_selection/plot_confusion_matrix.py
|
244
|
2496
|
"""
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
|
bsd-3-clause
|
tawsifkhan/scikit-learn
|
examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
2601
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
bsd-3-clause
|
ldirer/scikit-learn
|
examples/svm/plot_iris.py
|
65
|
3742
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C),
svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, C=C))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
|
bsd-3-clause
|
credp/lisa
|
lisa/tests/staging/utilclamp.py
|
1
|
13567
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
from operator import itemgetter
import numpy as np
import os
import pandas as pd
from lisa.analysis.frequency import FrequencyAnalysis
from lisa.analysis.load_tracking import LoadTrackingAnalysis
from lisa.datautils import df_add_delta, series_mean, df_window
from lisa.pelt import PELT_SCALE
from lisa.tests.base import ResultBundle, TestBundle, RTATestBundle, TestMetric
from lisa.wlgen.rta import RTAPhase, PeriodicWload
class UtilClamp(RTATestBundle, TestBundle):
"""
Validate that UtilClamp min values are honoured properly by the kernel.
The test is split into 8 phases. For each phase, a UtilClamp value is set
for a task, whose duty cycle would generate a lower utilization. Then the
actual capacity, allocated to the task during its activation is checked.
The 8 phases UtilClamp values are picked to cover the entire SoC's CPU
scale. (usually between 0 and 1024)
.. code-block:: text
|<-- band 0 -->|<-- band 1 -->|<-- band 2 -->|<-- ...
capacities: 0 | 128 | 256 512
| |
--------------------|--------------|-------------------------------
phase 1: uclamp_val |
|
-----------------------------------|-------------------------------
phase 2: uclamp_val
...
phase 8:
"""
NR_PHASES = 8
CAPACITY_MARGIN = 0.8 # kernel task placement a 80% capacity margin
@classmethod
def check_from_target(cls, target):
kconfig = target.plat_info['kernel']['config']
if not kconfig.get('UCLAMP_TASK'):
ResultBundle.raise_skip("The target's kernel needs CONFIG_UCLAMP_TASK=y kconfig enabled")
@classmethod
def _collect_capacities(cls, plat_info):
"""
Returns, for each CPU a mapping frequency / capacity:
dict(cpu, dict(freq, capacity))
where capacity = max_cpu_capacity * freq / max_cpu_frequency.
"""
max_capacities = plat_info['cpu-capacities']['rtapp']
return {
cpu: {
freq: int(max_capacities[cpu] * freq / max(freqs))
for freq in freqs
}
for cpu, freqs in plat_info['freqs'].items()
}
@classmethod
def _collect_capacities_flatten(cls, plat_info):
capacities = [
capa
for freq_capas in cls._collect_capacities(plat_info).values()
for capa in freq_capas.values()
]
# Remove the duplicates from the list
return sorted(set(capacities))
@classmethod
def _get_bands(cls, capacities):
bands = list(zip(capacities, capacities[1:]))
# Only keep a number of bands
nr_bands = cls.NR_PHASES
if len(bands) > nr_bands:
# Pick the bands covering the widest range of util, since they
# are easier to test
bands = sorted(
bands,
key=lambda band: band[1] - band[0],
reverse=True
)
bands = bands[:nr_bands]
bands = sorted(bands, key=itemgetter(0))
return bands
@classmethod
def _get_phases(cls, plat_info):
"""
Returns a list of phases. Each phase being described by a tuple:
(uclamp_val, util)
"""
capacities = cls._collect_capacities_flatten(plat_info)
bands = cls._get_bands(capacities)
def band_mid(band):
return int((band[1] + band[0]) / 2)
def make_phase(band):
uclamp = band_mid(band)
util = uclamp / 2
name = f'uclamp-{uclamp}'
return (name, (uclamp, util))
return dict(map(make_phase, bands))
@classmethod
def _get_rtapp_profile(cls, plat_info):
periods = [
RTAPhase(
prop_name=name,
prop_wload=PeriodicWload(
duty_cycle_pct=(util / PELT_SCALE) * 100, # util to pct
duration=5,
period=cls.TASK_PERIOD,
),
prop_uclamp=(uclamp_val, uclamp_val),
prop_meta={'uclamp_val': uclamp_val},
)
for name, (uclamp_val, util) in cls._get_phases(plat_info).items()
]
return {'task': functools.reduce(lambda a, b: a + b, periods)}
def _get_trace_df(self):
task = self.rtapp_task_ids_map['task'][0]
# There is no CPU selection when we're going back from preemption.
# Setting preempted_value=1 ensures that it won't count as a new
# activation.
df = self.trace.analysis.tasks.df_task_activation(task,
preempted_value=1)
df = df[['active', 'cpu']]
df_freq = self.trace.analysis.frequency.df_cpus_frequency()
df_freq = df_freq[['cpu', 'frequency']]
df_freq = df_freq.pivot(index=None, columns='cpu', values='frequency')
df_freq.reset_index(inplace=True)
df_freq.set_index('Time', inplace=True)
df = df.merge(df_freq, how='outer', left_index=True, right_index=True)
# Ensures that frequency values are propogated through the entire
# DataFrame, as it is possible that no frequency event occur
# during a phase.
for cpu in self.plat_info['cpu-capacities']['rtapp']:
df[cpu].ffill(inplace=True)
return df
def _get_phases_df(self):
task = self.rtapp_task_ids_map['task'][0]
df = self.trace.analysis.rta.df_phases(task, wlgen_profile=self.rtapp_profile)
df = df.copy()
df = df[df['properties'].apply(lambda props: props['meta']['from_test'])]
df.reset_index(inplace=True)
df.rename(columns={'index': 'start'}, inplace=True)
df['end'] = df['start'].shift(-1)
df['uclamp_val'] = df['properties'].apply(lambda row: row['meta']['uclamp_val'])
return df
def _for_each_phase(self, callback):
df_phases = self._get_phases_df()
df_trace = self._get_trace_df()
def parse_phase(phase):
start = phase['start']
end = phase['end']
df = df_trace
# During a phase change, rt-app will wakeup and then change
# UtilClamp value will be changed. We then need to wait for the
# second wakeup for the kernel to apply the most recently set
# UtilClamp value.
start = df[(df.index >= start) &
(df['active'] == 1)].first_valid_index()
end = end if not np.isnan(end) else df.last_valid_index()
if (start >= end):
raise ValueError('Phase ends before it has even started')
df = df_trace[start:end].copy()
return callback(df, phase)
return df_phases.apply(parse_phase, axis=1)
def _plot_phases(self, test, failures, signal=None):
task = self.rtapp_task_ids_map['task'][0]
ax = self.trace.analysis.tasks.plot_task_activation(task,
which_cpu=True)
ax = self.trace.analysis.rta.plot_phases(task, wlgen_profile=self.rtapp_profile, axis=ax)
for failure in failures:
ax.axvline(failure, alpha=0.5, color='r')
if signal is not None:
signal.plot(ax=ax.twinx(), drawstyle='steps-post')
filepath = os.path.join(self.res_dir, f'utilclamp_{test}.png')
self.trace.analysis.rta.save_plot(ax.figure, filepath=filepath)
return ax
@FrequencyAnalysis.df_cpus_frequency.used_events
@LoadTrackingAnalysis.df_tasks_signal.used_events
def test_placement(self) -> ResultBundle:
"""
For each phase, checks if the task placement is compatible with
UtilClamp requirements. This is done by comparing the maximum capacity
of the CPU on which the task has been placed, with the UtilClamp
value.
"""
metrics = {}
test_failures = []
capacity_margin = self.CAPACITY_MARGIN
cpu_max_capacities = self.plat_info['cpu-capacities']['rtapp']
def parse_phase(df, phase):
uclamp_val = phase['uclamp_val']
num_activations = df['active'][df['active'] == 1].count()
cpus = set(map(int, df.cpu.dropna().unique()))
fitting_cpus = {
cpu
for cpu, cap in cpu_max_capacities.items()
if (cap == PELT_SCALE) or (cap * capacity_margin) > uclamp_val
}
failures = df[(
df['active'] == 1) & (df['cpu'].isin(cpus - fitting_cpus))
].index.tolist()
num_failures = len(failures)
test_failures.extend(failures)
metrics[phase['phase']] = {
'uclamp-min': TestMetric(uclamp_val),
'cpu-placements': TestMetric(cpus),
'expected-cpus': TestMetric(fitting_cpus),
'bad-activations': TestMetric(
num_failures * 100 / num_activations, "%"),
}
return cpus.issubset(fitting_cpus)
res = ResultBundle.from_bool(self._for_each_phase(parse_phase).all())
res.add_metric('Phases', metrics)
self._plot_phases('test_placement', test_failures)
return res
@FrequencyAnalysis.df_cpus_frequency.used_events
@LoadTrackingAnalysis.df_tasks_signal.used_events
def test_freq_selection(self) -> ResultBundle:
"""
For each phase, checks if the task placement and frequency selection
is compatible with UtilClamp requirements. This is done by comparing
the current CPU capacity on which the task has been placed, with the
UtilClamp value.
The expected capacity is the schedutil projected frequency selection
for the given uclamp value.
"""
metrics = {}
test_failures = []
capacity_dfs = []
# (
# # schedutil factor that converts util to a frequency for a
# # given CPU:
# #
# # next_freq = max_freq * C * util / max_cap
# #
# # where C = 1.25
# schedutil_factor,
#
# # list of frequencies available for a given CPU.
# frequencies,
# )
cpu_frequencies = {
cpu: (
(max(capacities) * (1 / self.CAPACITY_MARGIN)) / max(capacities.values()),
sorted(capacities)
)
for cpu, capacities in
self._collect_capacities(self.plat_info).items()
}
cpu_capacities = self._collect_capacities(self.plat_info)
def schedutil_map_util_cap(cpu, util):
"""
Returns, for a given util on a given CPU, the capacity that
schedutil would select.
"""
schedutil_factor, frequencies = cpu_frequencies[cpu]
schedutil_freq = schedutil_factor * util
# Find the first available freq that meet the schedutil freq
# requirement.
for freq in frequencies:
if freq >= schedutil_freq:
break
return cpu_capacities[cpu][freq]
def parse_phase(df, phase):
uclamp_val = phase['uclamp_val']
num_activations = df['active'][df['active'] == 1].count()
expected = schedutil_map_util_cap(df['cpu'].unique()[0],
uclamp_val)
# Activations numbering
df['activation'] = df['active'].cumsum()
# Only keep the activations
df.ffill(inplace=True)
df = df[df['active'] == 1]
# Actual capacity at which the task is running
for cpu, freq_to_capa in cpu_capacities.items():
df[cpu] = df[cpu].map(freq_to_capa)
df['capacity'] = df.apply(lambda line: line[line.cpu], axis=1)
failures = df[df['capacity'] != expected]
num_failures = failures['activation'].nunique()
test_failures.extend(failures.index.tolist())
capacity_dfs.append(df[['capacity']])
metrics[phase['phase']] = {
'uclamp-min': TestMetric(uclamp_val),
'expected-capacity': TestMetric(expected),
'bad-activations': TestMetric(
num_failures * 100 / num_activations, "%"),
}
return failures.empty
res = ResultBundle.from_bool(self._for_each_phase(parse_phase).all())
res.add_metric('Phases', metrics)
self._plot_phases('test_frequency', test_failures,
pd.concat(capacity_dfs))
return res
|
apache-2.0
|
chromium/chromium
|
tools/perf/cli_tools/soundwave/commands.py
|
10
|
5506
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import json
import logging
try:
import sqlite3
except ImportError:
pass
from core import cli_utils
from core.external_modules import pandas
from core.services import dashboard_service
from cli_tools.soundwave import pandas_sqlite
from cli_tools.soundwave import studies
from cli_tools.soundwave import tables
from cli_tools.soundwave import worker_pool
def _FetchBugsWorker(args):
con = sqlite3.connect(args.database_file, timeout=10)
def Process(bug_id):
bugs = tables.bugs.DataFrameFromJson([dashboard_service.Bugs(bug_id)])
pandas_sqlite.InsertOrReplaceRecords(con, 'bugs', bugs)
worker_pool.Process = Process
def FetchAlertsData(args):
params = {
'test_suite': args.benchmark,
'min_timestamp': cli_utils.DaysAgoToTimestamp(args.days)
}
if args.sheriff != 'all':
params['sheriff'] = args.sheriff
with tables.DbSession(args.database_file) as con:
# Get alerts.
num_alerts = 0
bug_ids = set()
# TODO: This loop may be slow when fetching thousands of alerts, needs a
# better progress indicator.
for data in dashboard_service.IterAlerts(**params):
alerts = tables.alerts.DataFrameFromJson(data)
pandas_sqlite.InsertOrReplaceRecords(con, 'alerts', alerts)
num_alerts += len(alerts)
bug_ids.update(alerts['bug_id'].unique())
print('%d alerts found!' % num_alerts)
# Get set of bugs associated with those alerts.
bug_ids.discard(0) # A bug_id of 0 means untriaged.
print('%d bugs found!' % len(bug_ids))
# Filter out bugs already in cache.
if args.use_cache:
known_bugs = set(
b for b in bug_ids if tables.bugs.Get(con, b) is not None)
if known_bugs:
print('(skipping %d bugs already in the database)' % len(known_bugs))
bug_ids.difference_update(known_bugs)
# Use worker pool to fetch bug data.
total_seconds = worker_pool.Run(
'Fetching data of %d bugs: ' % len(bug_ids),
_FetchBugsWorker, args, bug_ids)
print('[%.1f bugs per second]' % (len(bug_ids) / total_seconds))
def _IterStaleTestPaths(con, test_paths):
"""Iterate over test_paths yielding only those with stale or absent data.
A test_path is considered to be stale if the most recent data point we have
for it in the db is more than a day older.
"""
a_day_ago = pandas.Timestamp.utcnow() - pandas.Timedelta(days=1)
a_day_ago = a_day_ago.tz_convert(tz=None)
for test_path in test_paths:
latest = tables.timeseries.GetMostRecentPoint(con, test_path)
if latest is None or latest['timestamp'] < a_day_ago:
yield test_path
def _FetchTimeseriesWorker(args):
con = sqlite3.connect(args.database_file, timeout=10)
min_timestamp = cli_utils.DaysAgoToTimestamp(args.days)
def Process(test_path):
try:
if isinstance(test_path, tables.timeseries.Key):
params = test_path.AsApiParams()
params['min_timestamp'] = min_timestamp
data = dashboard_service.Timeseries2(**params)
else:
data = dashboard_service.Timeseries(test_path, days=args.days)
except KeyError:
logging.info('Timeseries not found: %s', test_path)
return
timeseries = tables.timeseries.DataFrameFromJson(test_path, data)
pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries)
worker_pool.Process = Process
def _ReadTimeseriesFromFile(filename):
with open(filename, 'r') as f:
data = json.load(f)
return [tables.timeseries.Key.FromDict(ts) for ts in data]
def FetchTimeseriesData(args):
def _MatchesAllFilters(test_path):
return all(f in test_path for f in args.filters)
with tables.DbSession(args.database_file) as con:
# Get test_paths.
if args.benchmark is not None:
test_paths = dashboard_service.ListTestPaths(
args.benchmark, sheriff=args.sheriff)
elif args.input_file is not None:
test_paths = _ReadTimeseriesFromFile(args.input_file)
elif args.study is not None:
test_paths = list(args.study.IterTestPaths())
else:
raise ValueError('No source for test paths specified')
# Apply --filter's to test_paths.
if args.filters:
test_paths = filter(_MatchesAllFilters, test_paths)
num_found = len(test_paths)
print('%d test paths found!' % num_found)
# Filter out test_paths already in cache.
if args.use_cache:
test_paths = list(_IterStaleTestPaths(con, test_paths))
num_skipped = num_found - len(test_paths)
if num_skipped:
print('(skipping %d test paths already in the database)' % num_skipped)
# Use worker pool to fetch test path data.
total_seconds = worker_pool.Run(
'Fetching data of %d timeseries: ' % len(test_paths),
_FetchTimeseriesWorker, args, test_paths)
print('[%.1f test paths per second]' % (len(test_paths) / total_seconds))
if args.output_csv is not None:
print()
print('Post-processing data for study ...')
dfs = []
with tables.DbSession(args.database_file) as con:
for test_path in test_paths:
df = tables.timeseries.GetTimeSeries(con, test_path)
dfs.append(df)
df = studies.PostProcess(pandas.concat(dfs, ignore_index=True))
with cli_utils.OpenWrite(args.output_csv) as f:
df.to_csv(f, index=False)
print('Wrote timeseries data to:', args.output_csv)
|
bsd-3-clause
|
dsquareindia/scikit-learn
|
examples/neural_networks/plot_mnist_filters.py
|
79
|
2189
|
"""
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# solver='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
bsd-3-clause
|
elijah513/scikit-learn
|
sklearn/datasets/tests/test_rcv1.py
|
322
|
2414
|
"""Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
|
bsd-3-clause
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAIndicator/talib_series.py
|
2
|
6018
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try:
import talib
except:
pass
#print('PLEASE install TALIB to call these methods')
import pandas as pd
def CMO(Series, timeperiod=14):
res = talib.CMO(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def BBANDS(Series, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
up, middle, low = talib.BBANDS(
Series.values, timeperiod, nbdevup, nbdevdn, matype)
return pd.Series(up, index=Series.index), pd.Series(middle, index=Series.index), pd.Series(low, index=Series.index)
def BETA(SeriesA, SeriesB, timeperiod=5):
res = talib.BETA(SeriesA.values, SeriesB.values, timeperiod)
return pd.Series(res, index=SeriesA.index)
def CORREL(SeriesA, SeriesB, timeperiod=5):
res = talib.BETA(SeriesA.values, SeriesB.values, timeperiod)
return pd.Series(res, index=SeriesA.index)
def DEMA(Series, timeperiod=30):
res = talib.DEMA(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
# def EMA(Series, timeperiod=30):
# res = talib.EMA(Series.values, timeperiod)
# return pd.Series(res, index=Series.index)
def HT_DCPERIOD(Series):
res = talib.HT_DCPERIOD(Series.values)
return pd.Series(res, index=Series.index)
def HT_DCPHASE(Series):
res = talib.HT_DCPHASE(Series.values)
return pd.Series(res, index=Series.index)
def HT_PHASOR(Series):
res = talib.HT_PHASOR(Series.values)
return pd.Series(res, index=Series.index)
def HT_SINE(Series):
res = talib.HT_SINE(Series.values)
return pd.Series(res, index=Series.index)
def HT_TRENDLINE(Series):
res = talib.HT_TRENDLINE(Series.values)
return pd.Series(res, index=Series.index)
def HT_TRENDMODE(Series):
res = talib.HT_TRENDMODE(Series.values)
return pd.Series(res, index=Series.index)
def KAMA(Series, timeperiod=30):
res = talib.KAMA(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG(Series, timeperiod=14):
res = talib.LINEARREG(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG_ANGLE(Series, timeperiod=14):
res = talib.LINEARREG_ANGLE(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG_INTERCEPT(Series, timeperiod=14):
res = talib.LINEARREG_INTERCEPT(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
def LINEARREG_SLOPE(Series, timeperiod=14):
res = talib.LINEARREG_SLOPE(Series.values, timeperiod)
return pd.Series(res, index=Series.index)
# def MA(Series,):
# 废弃* 因为和QA的MA函数冲突
# def MACD(Series):
# 废弃* 因为和QA的MACD函数冲突
def MACDEXT(Series, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0):
macd, macdsignal, macdhist = talib.MACDEXT(
Series.values, fastperiod, fastmatype, slowperiod, slowmatype, signalperiod, signalmatype)
return pd.Series(macd, index=Series.index), pd.Series(macdsignal, index=Series.index), pd.Series(macdhist, index=Series.index)
def MACDFIX(Series, timeperiod=9):
macd, macdsignal, macdhist = talib.MACDFIX(Series.values, timeperiod)
return pd.Series(macd, index=Series.index), pd.Series(macdsignal, index=Series.index), pd.Series(macdhist, index=Series.index)
def MAMA(Series, fastlimit=0.5, slowlimit=0.05):
mama, fama = talib.MAMA(Series.values, fastlimit, slowlimit)
return pd.Series(mama, index=Series.index), pd.Series(fama, index=Series.index)
# # MAVP - Moving average with variable period
# real = talib.MAVP(close, periods, minperiod=2, maxperiod=30, matype=0)
# # MIDPOINT - MidPoint over period
# real = talib.MIDPOINT(close, timeperiod=14)
# # MIDPRICE - Midpoint Price over period
# real = talib.MIDPRICE(high, low, timeperiod=14)
# # SAREXT - Parabolic SAR - Extended
# real = SAREXT(high, low, startvalue=0, offsetonreverse=0, accelerationinitlong=0,
# accelerationlong=0, accelerationmaxlong=0, accelerationinitshort=0, accelerationshort=0, accelerationmaxshort=0)
# # T3 - Triple Exponential Moving Average (T3)
# real = T3(close, timeperiod=5, vfactor=0)
# # TEMA - Triple Exponential Moving Average
# real = TEMA(close, timeperiod=30)
# # TRIMA - Triangular Moving Average
# real = TRIMA(close, timeperiod=30)
# # WMA - Weighted Moving Average
# real = WMA(close, timeperiod=30)
def SMA(Series, timeperiod=30):
return pd.Series(talib.SMA(Series.values, timeperiod), index=Series.index)
def STDDEV(Series, timeperiod=5, nbdev=1):
return pd.Series(talib.STDDEV(Series.values, timeperiod, nbdev), index=Series.index)
def STOCHRSI(Series, timeperiod=14, fastk_period=5, fastd_period=3, fastd_matype=0):
fastk, fastd = talib.STOCHRSI(
Series.values, fastk_period, fastd_period, fastd_matype)
return pd.Series(fastk, index=Series.index), pd.Series(fastd, index=Series.index)
|
mit
|
Supermem/ibis
|
ibis/impala/client.py
|
6
|
48961
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from posixpath import join as pjoin
import re
import six
import threading
import weakref
import hdfs
import ibis.common as com
from ibis.config import options
from ibis.client import (Query, AsyncQuery, Database,
DatabaseEntity, SQLClient)
from ibis.compat import lzip
from ibis.filesystems import HDFS, WebHDFS
from ibis.impala import udf, ddl
from ibis.impala.compat import impyla, ImpylaError, HS2Error
from ibis.impala.compiler import build_ast
from ibis.sql.compiler import DDL
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis.util as util
if six.PY2:
import Queue as queue
else:
import queue
class ImpalaDatabase(Database):
def list_udfs(self, like=None):
return self.client.list_udfs(like=self._qualify_like(like),
database=self.name)
def list_udas(self, like=None):
return self.client.list_udas(like=self._qualify_like(like),
database=self.name)
class ImpalaConnection(object):
"""
Database connection wrapper
"""
def __init__(self, pool_size=8, database='default', **params):
self.params = params
self.codegen_disabled = False
self.database = database
self.lock = threading.Lock()
self.connection_pool = queue.Queue(pool_size)
self.connection_pool_size = 0
self.max_pool_size = pool_size
self._connections = weakref.WeakValueDictionary()
self.ping()
def close(self):
"""
Close all open Impyla sessions
"""
for k, con in self._connections.items():
con.close()
def set_database(self, name):
self.database = name
def disable_codegen(self, disabled=True):
self.codegen_disabled = disabled
def execute(self, query, async=False):
if isinstance(query, DDL):
query = query.compile()
cursor = self._get_cursor()
self.log(query)
try:
cursor.execute(query, async=async)
except:
cursor.release()
self.error('Exception caused by {0}'.format(query))
raise
return cursor
def log(self, msg):
if options.verbose:
(options.verbose_log or to_stdout)(msg)
def error(self, msg):
self.log(msg)
def fetchall(self, query):
with self.execute(query) as cur:
results = cur.fetchall()
return results
def _get_cursor(self):
try:
cur = self.connection_pool.get(False)
if cur.database != self.database:
cur = self._new_cursor()
if cur.codegen_disabled != self.codegen_disabled:
cur.disable_codegen(self.codegen_disabled)
return cur
except queue.Empty:
if self.connection_pool_size < self.max_pool_size:
cursor = self._new_cursor()
self.connection_pool_size += 1
return cursor
else:
raise com.InternalError('Too many concurrent / hung queries')
def _new_cursor(self):
params = self.params.copy()
con = impyla.connect(database=self.database, **params)
self._connections[id(con)] = con
# make sure the connection works
cursor = con.cursor()
cursor.ping()
wrapper = ImpalaCursor(cursor, self, con, self.database)
if self.codegen_disabled:
wrapper.disable_codegen(self.codegen_disabled)
return wrapper
def ping(self):
self._new_cursor()
class ImpalaCursor(object):
def __init__(self, cursor, con, impyla_con, database,
codegen_disabled=False):
self.cursor = cursor
self.con = con
self.impyla_con = impyla_con
self.database = database
self.codegen_disabled = codegen_disabled
def __del__(self):
self._close_cursor()
def _close_cursor(self):
try:
self.cursor.close()
except HS2Error as e:
# connection was closed elsewhere
if 'invalid session' not in e.args[0].lower():
raise
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.release()
def disable_codegen(self, disabled=True):
self.codegen_disabled = disabled
query = ('SET disable_codegen={0}'
.format('true' if disabled else 'false'))
self.cursor.execute(query)
@property
def description(self):
return self.cursor.description
def release(self):
self.con.connection_pool.put(self)
def execute(self, stmt, async=False):
if async:
self.cursor.execute_async(stmt)
else:
self.cursor.execute(stmt)
def is_finished(self):
return not self.is_executing()
def is_executing(self):
return self.cursor.is_executing()
def cancel(self):
self.cursor.cancel_operation()
def fetchall(self):
return self.cursor.fetchall()
class ImpalaQuery(Query):
def _db_type_to_dtype(self, db_type):
return _HS2_TTypeId_to_dtype[db_type]
class ImpalaAsyncQuery(ImpalaQuery, AsyncQuery):
def __init__(self, client, ddl):
super(ImpalaAsyncQuery, self).__init__(client, ddl)
self._cursor = None
self._exception = None
self._execute_thread = None
self._execute_complete = False
self._operation_active = False
def __del__(self):
if self._cursor is not None:
self._cursor.release()
def execute(self):
if self._operation_active:
raise com.IbisError('operation already active')
con = self.client.con
# XXX: there is codegen overhead somewhere which causes execute_async
# to block, unfortunately. This threading hack works around it
def _async_execute():
try:
self._cursor = con.execute(self.compiled_ddl, async=True)
except Exception as e:
self._exception = e
self._execute_complete = True
self._execute_complete = False
self._operation_active = True
self._execute_thread = threading.Thread(target=_async_execute)
self._execute_thread.start()
return self
def _wait_execute(self):
if not self._operation_active:
raise com.IbisError('No active query')
if self._execute_thread.is_alive():
self._execute_thread.join()
elif self._exception is not None:
raise self._exception
def is_finished(self):
"""
Return True if the operation is finished
"""
from impala.error import ProgrammingError
self._wait_execute()
try:
return self._cursor.is_finished()
except ProgrammingError as e:
if 'state is not available' in e.args[0]:
return True
raise
def cancel(self):
"""
Cancel the query (or attempt to)
"""
self._wait_execute()
return self._cursor.cancel()
def status(self):
"""
Retrieve Impala query status
"""
self._wait_execute()
from impala.hiveserver2 import get_operation_status
cur = self._cursor
handle = cur._last_operation_handle
return get_operation_status(cur.service, handle)
def wait(self, progress_bar=True):
raise NotImplementedError
def get_result(self):
"""
Presuming the operation is completed, return the cursor result as would
be returned by the synchronous query API
"""
self._wait_execute()
result = self._fetch_from_cursor(self._cursor)
return self._wrap_result(result)
_HS2_TTypeId_to_dtype = {
'BOOLEAN': 'bool',
'TINYINT': 'int8',
'SMALLINT': 'int16',
'INT': 'int32',
'BIGINT': 'int64',
'TIMESTAMP': 'datetime64[ns]',
'FLOAT': 'float32',
'DOUBLE': 'float64',
'STRING': 'string',
'DECIMAL': 'object',
'BINARY': 'string',
'VARCHAR': 'string',
'CHAR': 'string'
}
class ImpalaClient(SQLClient):
"""
An Ibis client interface that uses Impala
"""
database_class = ImpalaDatabase
sync_query = ImpalaQuery
async_query = ImpalaAsyncQuery
def __init__(self, con, hdfs_client=None, **params):
self.con = con
if isinstance(hdfs_client, hdfs.Client):
hdfs_client = WebHDFS(hdfs_client)
elif hdfs_client is not None and not isinstance(hdfs_client, HDFS):
raise TypeError(hdfs_client)
self._hdfs = hdfs_client
self._temp_objects = weakref.WeakValueDictionary()
self._ensure_temp_db_exists()
def _build_ast(self, expr):
return build_ast(expr)
@property
def hdfs(self):
if self._hdfs is None:
raise com.IbisError('No HDFS connection; must pass connection '
'using the hdfs_client argument to '
'ibis.make_client')
return self._hdfs
@property
def _table_expr_klass(self):
return ImpalaTable
def close(self):
"""
Close Impala connection and drop any temporary objects
"""
for k, v in self._temp_objects.items():
try:
v.drop()
except HS2Error:
pass
self.con.close()
def disable_codegen(self, disabled=True):
"""
Turn off or on LLVM codegen in Impala query execution
Parameters
----------
disabled : boolean, default True
To disable codegen, pass with no argument or True. To enable codegen,
pass False
"""
self.con.disable_codegen(disabled)
def log(self, msg):
if options.verbose:
(options.verbose_log or to_stdout)(msg)
def _fully_qualified_name(self, name, database):
if ddl._is_fully_qualified(name):
return name
database = database or self.current_database
return '{0}.`{1}`'.format(database, name)
def list_tables(self, like=None, database=None):
"""
List tables in the current (or indicated) database. Like the SHOW
TABLES command in the impala-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
database : string, default None
If not passed, uses the current/default database
Returns
-------
tables : list of strings
"""
statement = 'SHOW TABLES'
if database:
statement += ' IN {0}'.format(database)
if like:
m = ddl.fully_qualified_re.match(like)
if m:
database, quoted, unquoted = m.groups()
like = quoted or unquoted
return self.list_tables(like=like, database=database)
statement += " LIKE '{0}'".format(like)
with self._execute(statement, results=True) as cur:
result = self._get_list(cur)
return result
def _get_list(self, cur):
tuples = cur.fetchall()
if len(tuples) > 0:
return list(lzip(*tuples)[0])
else:
return []
def set_database(self, name):
"""
Set the default database scope for client
"""
self.con.set_database(name)
def exists_database(self, name):
"""
Checks if a given database exists
Parameters
----------
name : string
Database name
Returns
-------
if_exists : boolean
"""
return len(self.list_databases(like=name)) > 0
def create_database(self, name, path=None, force=False):
"""
Create a new Impala database
Parameters
----------
name : string
Database name
path : string, default None
HDFS path where to store the database data; otherwise uses Impala
default
"""
if path:
# explicit mkdir ensures the user own the dir rather than impala,
# which is easier for manual cleanup, if necessary
self.hdfs.mkdir(path)
statement = ddl.CreateDatabase(name, path=path, can_exist=force)
self._execute(statement)
def drop_database(self, name, force=False):
"""
Drop an Impala database
Parameters
----------
name : string
Database name
force : boolean, default False
If False and there are any tables in this database, raises an
IntegrityError
"""
if not force or self.exists_database(name):
tables = self.list_tables(database=name)
udfs = self.list_udfs(database=name)
udas = self.list_udas(database=name)
else:
tables = []
udfs = []
udas = []
if force:
for table in tables:
self.log('Dropping {0}'.format('{0}.{1}'.format(name, table)))
self.drop_table_or_view(table, database=name)
for func in udfs:
self.log('Dropping function {0}({1})'.format(func.name,
func.inputs))
self.drop_udf(func.name, input_types=func.inputs,
database=name, force=True)
for func in udas:
self.log('Dropping aggregate function {0}({1})'
.format(func.name, func.inputs))
self.drop_uda(func.name, input_types=func.inputs,
database=name, force=True)
else:
if len(tables) > 0 or len(udfs) > 0 or len(udas) > 0:
raise com.IntegrityError('Database {0} must be empty before '
'being dropped, or set '
'force=True'.format(name))
statement = ddl.DropDatabase(name, must_exist=not force)
self._execute(statement)
def list_databases(self, like=None):
"""
List databases in the Impala cluster. Like the SHOW DATABASES command
in the impala-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings
"""
statement = 'SHOW DATABASES'
if like:
statement += " LIKE '{0}'".format(like)
with self._execute(statement, results=True) as cur:
results = self._get_list(cur)
return results
def get_partition_schema(self, table_name, database=None):
"""
For partitioned tables, return the schema (names and types) for the
partition columns
Parameters
----------
table_name : string
May be fully qualified
database : string, default None
Returns
-------
partition_schema : ibis Schema
"""
qualified_name = self._fully_qualified_name(table_name, database)
schema = self.get_schema(table_name, database=database)
name_to_type = dict(zip(schema.names, schema.types))
query = 'SHOW PARTITIONS {0}'.format(qualified_name)
result = self._execute_query(query)
partition_fields = []
for x in result.columns:
if x not in name_to_type:
break
partition_fields.append((x, name_to_type[x]))
pnames, ptypes = zip(*partition_fields)
return dt.Schema(pnames, ptypes)
def get_schema(self, table_name, database=None):
"""
Return a Schema object for the indicated table and database
Parameters
----------
table_name : string
May be fully qualified
database : string, default None
Returns
-------
schema : ibis Schema
"""
qualified_name = self._fully_qualified_name(table_name, database)
query = 'DESCRIBE {0}'.format(qualified_name)
tuples = self.con.fetchall(query)
names, types, comments = zip(*tuples)
ibis_types = []
for t in types:
t = t.lower()
t = udf._impala_to_ibis_type.get(t, t)
ibis_types.append(t)
names = [x.lower() for x in names]
return dt.Schema(names, ibis_types)
def exists_table(self, name, database=None):
"""
Determine if the indicated table or view exists
Parameters
----------
name : string
database : string, default None
Returns
-------
if_exists : boolean
"""
return len(self.list_tables(like=name, database=database)) > 0
def create_view(self, name, expr, database=None):
"""
Create an Impala view from a table expression
Parameters
----------
name : string
expr : ibis TableExpr
database : string, default None
"""
ast = self._build_ast(expr)
select = ast.queries[0]
statement = ddl.CreateView(name, select, database=database)
self._execute(statement)
def drop_view(self, name, database=None, force=False):
"""
Drop an Impala view
Parameters
----------
name : string
database : string, default None
force : boolean, default False
Database may throw exception if table does not exist
"""
statement = ddl.DropView(name, database=database,
must_exist=not force)
self._execute(statement)
def create_table(self, table_name, expr=None, schema=None, database=None,
format='parquet', force=False, external=False,
path=None, partition=None, like_parquet=None):
"""
Create a new table in Impala using an Ibis table expression
Parameters
----------
table_name : string
expr : TableExpr, optional
If passed, creates table from select statement results
schema : ibis.Schema, optional
Mutually exclusive with expr, creates an empty table with a
particular schema
database : string, default None (optional)
format : {'parquet'}
force : boolean, default False
Do not create table if table with indicated name already exists
external : boolean, default False
Create an external table; Impala will not delete the underlying data
when the table is dropped
path : string, default None
Specify the path where Impala reads and writes files for the table
partition : list of strings
Must pass a schema to use this. Cannot partition from an expression
(create-table-as-select)
like_parquet : string (HDFS path), optional
Can specify in lieu of a schema
Examples
--------
con.create_table('new_table_name', table_expr)
"""
if like_parquet is not None:
raise NotImplementedError
if expr is not None:
ast = self._build_ast(expr)
select = ast.queries[0]
if partition is not None:
# Fairly certain this is currently the case
raise ValueError('partition not supported with '
'create-table-as-select')
statement = ddl.CTAS(table_name, select,
database=database,
can_exist=force,
format=format,
external=external,
path=path)
elif schema is not None:
statement = ddl.CreateTableWithSchema(
table_name, schema, ddl.NoFormat(),
database=database,
format=format,
can_exist=force,
external=external,
path=path, partition=partition)
else:
raise com.IbisError('Must pass expr or schema')
self._execute(statement)
def pandas(self, df, name=None, database=None, persist=False):
"""
Create a (possibly temp) parquet table from a local pandas DataFrame.
"""
name, database = self._get_concrete_table_path(name, database,
persist=persist)
qualified_name = self._fully_qualified_name(name, database)
# write df to a temp CSV file on HDFS
temp_csv_hdfs_dir = pjoin(options.impala.temp_hdfs_path, util.guid())
buf = six.BytesIO()
df.to_csv(buf, header=False, index=False, na_rep='\\N')
self.hdfs.put(pjoin(temp_csv_hdfs_dir, '0.csv'), buf)
# define a temporary table using delimited data
schema = pandas_to_ibis_schema(df)
table = self.delimited_file(
temp_csv_hdfs_dir, schema,
name='ibis_tmp_pandas_{0}'.format(util.guid()), database=database,
external=True, persist=False)
# CTAS into Parquet
self.create_table(name, expr=table, database=database,
format='parquet', force=False)
# cleanup
self.hdfs.delete(temp_csv_hdfs_dir, recursive=True)
return self._wrap_new_table(qualified_name, persist)
def avro_file(self, hdfs_dir, avro_schema, name=None, database=None,
external=True, persist=False):
"""
Create a (possibly temporary) table to read a collection of Avro data.
Parameters
----------
hdfs_dir : string
Absolute HDFS path to directory containing avro files
avro_schema : dict
The Avro schema for the data as a Python dict
name : string, default None
database : string, default None
external : boolean, default True
persist : boolean, default False
Returns
-------
avro_table : ImpalaTable
"""
name, database = self._get_concrete_table_path(name, database,
persist=persist)
qualified_name = self._fully_qualified_name(name, database)
stmt = ddl.CreateTableAvro(name, hdfs_dir, avro_schema,
database=database,
external=external)
self._execute(stmt)
return self._wrap_new_table(qualified_name, persist)
def delimited_file(self, hdfs_dir, schema, name=None, database=None,
delimiter=',', escapechar=None, lineterminator=None,
external=True, persist=False):
"""
Interpret delimited text files (CSV / TSV / etc.) as an Ibis table. See
`parquet_file` for more exposition on what happens under the hood.
Parameters
----------
hdfs_dir : string
HDFS directory name containing delimited text files
schema : ibis Schema
name : string, default None
Name for temporary or persistent table; otherwise random one
generated
database : string
Database to create the (possibly temporary) table in
delimiter : length-1 string, default ','
Pass None if there is no delimiter
escapechar : length-1 string
Character used to escape special characters
lineterminator : length-1 string
Character used to delimit lines
external : boolean, default True
Create table as EXTERNAL (data will not be deleted on drop). Not that
if persist=False and external=False, whatever data you reference will
be deleted
persist : boolean, default False
If True, do not delete the table upon garbage collection of ibis
table object
Returns
-------
delimited_table : ImpalaTable
"""
name, database = self._get_concrete_table_path(name, database,
persist=persist)
qualified_name = self._fully_qualified_name(name, database)
stmt = ddl.CreateTableDelimited(name, hdfs_dir, schema,
database=database,
delimiter=delimiter,
external=external,
lineterminator=lineterminator,
escapechar=escapechar)
self._execute(stmt)
return self._wrap_new_table(qualified_name, persist)
def parquet_file(self, hdfs_dir, schema=None, name=None, database=None,
external=True, like_file=None, like_table=None,
persist=False):
"""
Make indicated parquet file in HDFS available as an Ibis table.
The table created can be optionally named and persisted, otherwise a
unique name will be generated. Temporarily, for any non-persistent
external table created by Ibis we will attempt to drop it when the
underlying object is garbage collected (or the Python interpreter shuts
down normally).
Parameters
----------
hdfs_dir : string
Path in HDFS
schema : ibis Schema
If no schema provided, and neither of the like_* argument is passed,
one will be inferred from one of the parquet files in the directory.
like_file : string
Absolute path to Parquet file in HDFS to use for schema
definitions. An alternative to having to supply an explicit schema
like_table : string
Fully scoped and escaped string to an Impala table whose schema we
will use for the newly created table.
name : string, optional
random unique name generated otherwise
database : string, optional
Database to create the (possibly temporary) table in
external : boolean, default True
If a table is external, the referenced data will not be deleted when
the table is dropped in Impala. Otherwise (external=False) Impala
takes ownership of the Parquet file.
persist : boolean, default False
Do not drop the table upon Ibis garbage collection / interpreter
shutdown
Returns
-------
parquet_table : ImpalaTable
"""
name, database = self._get_concrete_table_path(name, database,
persist=persist)
# If no schema provided, need to find some absolute path to a file in
# the HDFS directory
if like_file is None and like_table is None and schema is None:
file_name = self.hdfs._find_any_file(hdfs_dir)
like_file = pjoin(hdfs_dir, file_name)
qualified_name = self._fully_qualified_name(name, database)
stmt = ddl.CreateTableParquet(name, hdfs_dir,
schema=schema,
database=database,
example_file=like_file,
example_table=like_table,
external=external,
can_exist=False)
self._execute(stmt)
return self._wrap_new_table(qualified_name, persist)
def _get_concrete_table_path(self, name, database, persist=False):
if not persist:
if name is None:
name = util.guid()
if database is None:
self._ensure_temp_db_exists()
database = options.impala.temp_db
return name, database
else:
if name is None:
raise com.IbisError('Must pass table name if persist=True')
return name, database
def _ensure_temp_db_exists(self):
# TODO: session memoize to avoid unnecessary `SHOW DATABASES` calls
name, path = options.impala.temp_db, options.impala.temp_hdfs_path
if not self.exists_database(name):
self.create_database(name, path=path, force=True)
def _wrap_new_table(self, qualified_name, persist):
if persist:
t = self.table(qualified_name)
else:
schema = self._get_table_schema(qualified_name)
node = ImpalaTemporaryTable(qualified_name, schema, self)
t = self._table_expr_klass(node)
# Compute number of rows in table for better default query planning
cardinality = t.count().execute()
set_card = ("alter table {0} set tblproperties('numRows'='{1}', "
"'STATS_GENERATED_VIA_STATS_TASK' = 'true')"
.format(qualified_name, cardinality))
self._execute(set_card)
self._temp_objects[id(t)] = t
return t
def text_file(self, hdfs_path, column_name='value'):
"""
Interpret text data as a table with a single string column.
Parameters
----------
Returns
-------
text_table : TableExpr
"""
pass
def insert(self, table_name, expr, database=None, overwrite=False,
validate=True):
"""
Insert into existing table
Parameters
----------
table_name : string
expr : TableExpr
database : string, default None
overwrite : boolean, default False
If True, will replace existing contents of table
validate : boolean, default True
If True, do more rigorous validation that schema of table being
inserted is compatible with the existing table
Examples
--------
con.insert('my_table', table_expr)
# Completely overwrite contents
con.insert('my_table', table_expr, overwrite=True)
"""
if validate:
existing_schema = self.get_schema(table_name, database=database)
insert_schema = expr.schema()
if not insert_schema.equals(existing_schema):
_validate_compatible(insert_schema, existing_schema)
ast = self._build_ast(expr)
select = ast.queries[0]
statement = ddl.InsertSelect(table_name, select,
database=database,
overwrite=overwrite)
self._execute(statement)
def drop_table(self, table_name, database=None, force=False):
"""
Drop an Impala table
Parameters
----------
table_name : string
database : string, default None (optional)
force : boolean, default False
Database may throw exception if table does not exist
Examples
--------
con.drop_table('my_table', database='operations', force=True)
"""
statement = ddl.DropTable(table_name, database=database,
must_exist=not force)
self._execute(statement)
def truncate_table(self, table_name, database=None):
"""
Delete all rows from, but do not drop, an existing table
Parameters
----------
table_name : string
database : string, default None (optional)
"""
statement = ddl.TruncateTable(table_name, database=database)
self._execute(statement)
def drop_table_or_view(self, name, database=None, force=False):
"""
Attempt to drop a relation that may be a view or table
"""
try:
self.drop_table(name, database=database)
except Exception as e:
try:
self.drop_view(name, database=database)
except:
raise e
def cache_table(self, table_name, database=None, pool='default'):
"""
Caches a table in cluster memory in the given pool.
Parameters
----------
table_name : string
database : string default None (optional)
pool : string, default 'default'
The name of the pool in which to cache the table
Examples
--------
con.cache_table('my_table', database='operations', pool='op_4GB_pool')
"""
statement = ddl.CacheTable(table_name, database=database, pool=pool)
self._execute(statement)
def _get_table_schema(self, tname):
query = 'SELECT * FROM {0} LIMIT 0'.format(tname)
return self._get_schema_using_query(query)
def _get_schema_using_query(self, query):
with self._execute(query, results=True) as cur:
# resets the state of the cursor and closes operation
cur.fetchall()
names, ibis_types = self._adapt_types(cur.description)
# per #321; most Impala tables will be lower case already, but Avro
# data, depending on the version of Impala, might have field names in
# the metastore cased according to the explicit case in the declared
# avro schema. This is very annoying, so it's easier to just conform on
# all lowercase fields from Impala.
names = [x.lower() for x in names]
return dt.Schema(names, ibis_types)
def create_function(self, func, name=None, database=None):
"""
Creates a function within Impala
Parameters
----------
func : ImpalaUDF or ImpalaUDA
Created with wrap_udf or wrap_uda
name : string (optional)
database : string (optional)
"""
if name is None:
name = func.name
database = database or self.current_database
if isinstance(func, udf.ImpalaUDF):
stmt = ddl.CreateFunction(func.lib_path, func.so_symbol,
func.input_type,
func.output,
name, database)
elif isinstance(func, udf.ImpalaUDA):
stmt = ddl.CreateAggregateFunction(func.lib_path,
func.input_type,
func.output,
func.update_fn,
func.init_fn,
func.merge_fn,
func.serialize_fn,
func.finalize_fn,
name, database)
else:
raise TypeError(func)
self._execute(stmt)
def drop_udf(self, name, input_types=None, database=None, force=False,
aggregate=False):
"""
Drops a UDF
If only name is given, this will search
for the relevant UDF and drop it.
To delete an overloaded UDF, give only a name and force=True
Parameters
----------
name : string
input_types : list of strings (optional)
force : boolean, default False Must be set to true to
drop overloaded UDFs
database : string, default None
aggregate : boolean, default False
"""
if not input_types:
if not database:
database = self.current_database
result = self.list_udfs(database=database, like=name)
if len(result) > 1:
if force:
for func in result:
self._drop_single_function(func.name, func.inputs,
database=database,
aggregate=aggregate)
return
else:
raise Exception("More than one function " +
"with {0} found.".format(name) +
"Please specify force=True")
elif len(result) == 1:
func = result.pop()
self._drop_single_function(func.name, func.inputs,
database=database,
aggregate=aggregate)
return
else:
raise Exception("No function found with name {0}"
.format(name))
self._drop_single_function(name, input_types, database=database,
aggregate=aggregate)
def drop_uda(self, name, input_types=None, database=None, force=False):
"""
Drop aggregate function. See drop_udf for more information on the
parameters.
"""
return self.drop_udf(name, input_types=input_types, database=database,
force=force)
def _drop_single_function(self, name, input_types, database=None,
aggregate=False):
stmt = ddl.DropFunction(name, input_types, must_exist=False,
aggregate=aggregate, database=database)
self._execute(stmt)
def _drop_all_functions(self, database):
udfs = self.list_udfs(database=database)
for fnct in udfs:
stmt = ddl.DropFunction(fnct.name, fnct.inputs, must_exist=False,
aggregate=False, database=database)
self._execute(stmt)
udafs = self.list_udas(database=database)
for udaf in udafs:
stmt = ddl.DropFunction(udaf.name, udaf.inputs, must_exist=False,
aggregate=True, database=database)
self._execute(stmt)
def list_udfs(self, database=None, like=None):
"""
Lists all UDFs associated with given database
Parameters
----------
database : string
like : string for searching (optional)
"""
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=False)
with self._execute(statement, results=True) as cur:
result = self._get_udfs(cur, udf.ImpalaUDF)
return result
def list_udas(self, database=None, like=None):
"""
Lists all UDAFs associated with a given database
Parameters
----------
database : string
like : string for searching (optional)
"""
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=True)
with self._execute(statement, results=True) as cur:
result = self._get_udfs(cur, udf.ImpalaUDA)
return result
def _get_udfs(self, cur, klass):
from ibis.expr.rules import varargs
from ibis.expr.datatypes import validate_type
def _to_type(x):
ibis_type = udf._impala_type_to_ibis(x.lower())
return validate_type(ibis_type)
tuples = cur.fetchall()
if len(tuples) > 0:
result = []
for out_type, sig in tuples:
name, types = _split_signature(sig)
types = _type_parser(types).types
inputs = []
for arg in types:
argm = _arg_type.match(arg)
var, simple = argm.groups()
if simple:
t = _to_type(simple)
inputs.append(t)
else:
t = _to_type(var)
inputs = varargs(t)
# TODO
# inputs.append(varargs(t))
break
output = udf._impala_type_to_ibis(out_type.lower())
result.append(klass(inputs, output, name=name))
return result
else:
return []
def exists_udf(self, name, database=None):
"""
Checks if a given UDF exists within a specified database
Parameters
----------
name : string, UDF name
database : string, database name
Returns
-------
if_exists : boolean
"""
return len(self.list_udfs(database=database, like=name)) > 0
def exists_uda(self, name, database=None):
"""
Checks if a given UDAF exists within a specified database
Parameters
----------
name : string, UDAF name
database : string, database name
Returns
-------
if_exists : boolean
"""
return len(self.list_udas(database=database, like=name)) > 0
def _adapt_types(self, descr):
names = []
adapted_types = []
for col in descr:
names.append(col[0])
impala_typename = col[1]
typename = udf._impala_to_ibis_type[impala_typename.lower()]
if typename == 'decimal':
precision, scale = col[4:6]
adapted_types.append(dt.Decimal(precision, scale))
else:
adapted_types.append(typename)
return names, adapted_types
def _set_limit(query, k):
limited_query = '{0}\nLIMIT {1}'.format(query, k)
return limited_query
def to_stdout(x):
print(x)
# ----------------------------------------------------------------------
# ORM-ish usability layer
class ScalarFunction(DatabaseEntity):
def drop(self):
pass
class AggregateFunction(DatabaseEntity):
def drop(self):
pass
class ImpalaTable(ir.TableExpr, DatabaseEntity):
"""
References a physical table in the Impala-Hive metastore
"""
@property
def _qualified_name(self):
return self.op().args[0]
@property
def _unqualified_name(self):
return self._match_name()[1]
@property
def _client(self):
return self.op().args[2]
def _match_name(self):
m = ddl.fully_qualified_re.match(self._qualified_name)
if not m:
raise com.IbisError('Cannot determine database name from {0}'
.format(self._qualified_name))
db, quoted, unquoted = m.groups()
return db, quoted or unquoted
@property
def _database(self):
return self._match_name()[0]
def compute_stats(self):
"""
Invoke Impala COMPUTE STATS command to compute column, table, and
partition statistics. No return value.
"""
stmt = 'COMPUTE STATS {0}'.format(self._qualified_name)
self._client._execute(stmt)
def drop(self):
"""
Drop the table from the database
"""
self._client.drop_table_or_view(self._qualified_name)
def insert(self, expr, overwrite=False, validate=True):
"""
Insert into Impala table. Wraps ImpalaClient.insert
Parameters
----------
expr : TableExpr
overwrite : boolean, default False
If True, will replace existing contents of table
validate : boolean, default True
If True, do more rigorous validation that schema of table being
inserted is compatible with the existing table
Examples
--------
t.insert(table_expr)
# Completely overwrite contents
t.insert(table_expr, overwrite=True)
"""
self._client.insert(self._qualified_name, expr, overwrite=overwrite,
validate=validate)
def rename(self, new_name, database=None):
"""
Rename table inside Impala.
Beware: mutates table expression in place.
Parameters
----------
new_name : string
database : string
"""
m = ddl.fully_qualified_re.match(new_name)
if not m and database is None:
database = self._database
statement = ddl.RenameTable(self._qualified_name, new_name,
new_database=database)
self._client._execute(statement)
# HACK. Not sure about the best API here...
op = self.op().change_name(statement.new_qualified_name)
self._arg = op
class ImpalaTemporaryTable(ops.DatabaseTable):
def __del__(self):
try:
self.drop()
except com.IbisError:
pass
def drop(self):
try:
self.source.drop_table(self.name)
except ImpylaError:
# database might have been dropped
pass
def pandas_col_to_ibis_type(col):
import pandas.core.common as pdcom
import ibis.expr.datatypes as dt
import numpy as np
dty = col.dtype
# datetime types
if pdcom.is_datetime64_dtype(dty):
if pdcom.is_datetime64_ns_dtype(dty):
return 'timestamp'
else:
raise com.IbisTypeError("Column {0} has dtype {1}, which is "
"datetime64-like but does "
"not use nanosecond units"
.format(col.name, dty))
if pdcom.is_timedelta64_dtype(dty):
print("Warning: encoding a timedelta64 as an int64")
return 'int64'
if pdcom.is_categorical_dtype(dty):
return dt.Category(len(col.cat.categories))
if pdcom.is_bool_dtype(dty):
return 'boolean'
# simple numerical types
if issubclass(dty.type, np.int8):
return 'int8'
if issubclass(dty.type, np.int16):
return 'int16'
if issubclass(dty.type, np.int32):
return 'int32'
if issubclass(dty.type, np.int64):
return 'int64'
if issubclass(dty.type, np.float32):
return 'float'
if issubclass(dty.type, np.float64):
return 'double'
if issubclass(dty.type, np.uint8):
return 'int16'
if issubclass(dty.type, np.uint16):
return 'int32'
if issubclass(dty.type, np.uint32):
return 'int64'
if issubclass(dty.type, np.uint64):
raise com.IbisTypeError("Column {0} is an unsigned int64"
.format(col.name))
if pdcom.is_object_dtype(dty):
# TODO: overly broad?
return 'string'
raise com.IbisTypeError("Column {0} is dtype {1}"
.format(col.name, dty))
def pandas_to_ibis_schema(frame):
from ibis.expr.api import schema
# no analog for decimal in pandas
pairs = []
for col_name in frame:
ibis_type = pandas_col_to_ibis_type(frame[col_name])
pairs.append((col_name, ibis_type))
return schema(pairs)
def _validate_compatible(from_schema, to_schema):
if from_schema.names != to_schema.names:
raise com.IbisInputError('Schemas have different names')
for lt, rt in zip(from_schema.types, to_schema.types):
if not rt.can_implicit_cast(lt):
raise com.IbisInputError('Cannot safely cast {0!r} to {1!r}'
.format(lt, rt))
def _split_signature(x):
name, rest = x.split('(', 1)
return name, rest[:-1]
_arg_type = re.compile('(.*)\.\.\.|([^\.]*)')
class _type_parser(object):
NORMAL, IN_PAREN = 0, 1
def __init__(self, value):
self.value = value
self.state = self.NORMAL
self.buf = six.StringIO()
self.types = []
for c in value:
self._step(c)
self._push()
def _push(self):
val = self.buf.getvalue().strip()
if val:
self.types.append(val)
self.buf = six.StringIO()
def _step(self, c):
if self.state == self.NORMAL:
if c == '(':
self.state = self.IN_PAREN
elif c == ',':
self._push()
return
elif self.state == self.IN_PAREN:
if c == ')':
self.state = self.NORMAL
self.buf.write(c)
|
apache-2.0
|
FluidityStokes/fluidity
|
tests/mms_rans_p2p1_cv_keps/function_printer.py
|
10
|
1113
|
from mms_keps_p1p1_bouss_tools import *
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
import sys
'''
run using:
python3 function_printer.py AA BB CC DD .. n_rows
where:
AA, BB, CC, DD are names of functions in mms_keps_p1p1_bouss_tools.py (any number can be entered)
n_rows is the number of rows to display the functions on
'''
functions = []
for arg in sys.argv[1:-1]:
functions.append(arg)
n_rows = int(sys.argv[-1])
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=0.2, hspace=0.2)
res = 50
X = linspace(0.0, pi, res)
Y = linspace(0.0, pi, res)
x = [0,0]
data = empty([len(functions), res, res])
for z, function in enumerate(functions):
for j, x[0] in enumerate(X):
for i, x[1] in enumerate(Y):
data[z,i,j] = eval(function + '(x)')
plt.subplot(n_rows, len(functions)/n_rows + 1, z+1)
CS = plt.contour(X, Y, data[z])
plt.clabel(CS, inline=1, fontsize=10)
plt.title(functions[z])
plt.show()
|
lgpl-2.1
|
levythu/swift
|
swift/common/middleware/xprofile.py
|
36
|
9905
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Profiling middleware for Swift Servers.
The current implementation is based on eventlet aware profiler.(For the
future, more profilers could be added in to collect more data for analysis.)
Profiling all incoming requests and accumulating cpu timing statistics
information for performance tuning and optimization. An mini web UI is also
provided for profiling data analysis. It can be accessed from the URL as
below.
Index page for browse profile data::
http://SERVER_IP:PORT/__profile__
List all profiles to return profile ids in json format::
http://SERVER_IP:PORT/__profile__/
http://SERVER_IP:PORT/__profile__/all
Retrieve specific profile data in different formats::
http://SERVER_IP:PORT/__profile__/PROFILE_ID?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/current?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/all?format=[default|json|csv|ods]
Retrieve metrics from specific function in json format::
http://SERVER_IP:PORT/__profile__/PROFILE_ID/NFL?format=json
http://SERVER_IP:PORT/__profile__/current/NFL?format=json
http://SERVER_IP:PORT/__profile__/all/NFL?format=json
NFL is defined by concatenation of file name, function name and the first
line number.
e.g.::
account.py:50(GETorHEAD)
or with full path:
opt/stack/swift/swift/proxy/controllers/account.py:50(GETorHEAD)
A list of URL examples:
http://localhost:8080/__profile__ (proxy server)
http://localhost:6000/__profile__/all (object server)
http://localhost:6001/__profile__/current (container server)
http://localhost:6002/__profile__/12345?format=json (account server)
The profiling middleware can be configured in paste file for WSGI servers such
as proxy, account, container and object servers. Please refer to the sample
configuration files in etc directory.
The profiling data is provided with four formats such as binary(by default),
json, csv and odf spreadsheet which requires installing odfpy library.
sudo pip install odfpy
There's also a simple visualization capability which is enabled by using
matplotlib toolkit. it is also required to be installed if you want to use
it to visualize statistic data.
sudo apt-get install python-matplotlib
"""
import os
import sys
import time
from eventlet import greenthread, GreenPool, patcher
import eventlet.green.profile as eprofile
from swift import gettext_ as _
from swift.common.utils import get_logger, config_true_value
from swift.common.swob import Request
from x_profile.exceptions import NotFoundException, MethodNotAllowed,\
ProfileException
from x_profile.html_viewer import HTMLViewer
from x_profile.profile_model import ProfileLog
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
text_type = str
else:
text_type = unicode
def bytes_(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type): # pragma: no cover
return s.encode(encoding, errors)
return s
try:
from urllib.parse import parse_qs
except ImportError:
try:
from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile'
# unwind the iterator; it may call start_response, do lots of work, etc
PROFILE_EXEC_EAGER = """
app_iter = self.app(environ, start_response)
app_iter_ = list(app_iter)
if hasattr(app_iter, 'close'):
app_iter.close()
"""
# don't unwind the iterator (don't consume resources)
PROFILE_EXEC_LAZY = """
app_iter_ = self.app(environ, start_response)
"""
thread = patcher.original('thread') # non-monkeypatched module needed
# This monkey patch code fix the problem of eventlet profile tool
# which can not accumulate profiling results across multiple calls
# of runcalls and runctx.
def new_setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def new_runctx(self, cmd, globals, locals):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def new_runcall(self, func, *args, **kw):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
class ProfileMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='profile')
self.log_filename_prefix = conf.get('log_filename_prefix',
DEFAULT_PROFILE_PREFIX)
dirname = os.path.dirname(self.log_filename_prefix)
# Notes: this effort may fail due to permission denied.
# it is better to be created and authorized to current
# user in advance.
if not os.path.exists(dirname):
os.makedirs(dirname)
self.dump_interval = float(conf.get('dump_interval', 5.0))
self.dump_timestamp = config_true_value(conf.get(
'dump_timestamp', 'no'))
self.flush_at_shutdown = config_true_value(conf.get(
'flush_at_shutdown', 'no'))
self.path = conf.get('path', '__profile__').replace('/', '')
self.unwind = config_true_value(conf.get('unwind', 'no'))
self.profile_module = conf.get('profile_module',
'eventlet.green.profile')
self.profiler = get_profiler(self.profile_module)
self.profile_log = ProfileLog(self.log_filename_prefix,
self.dump_timestamp)
self.viewer = HTMLViewer(self.path, self.profile_module,
self.profile_log)
self.dump_pool = GreenPool(1000)
self.last_dump_at = None
def __del__(self):
if self.flush_at_shutdown:
self.profile_log.clear(str(os.getpid()))
def _combine_body_qs(self, request):
wsgi_input = request.environ['wsgi.input']
query_dict = request.params
qs_in_body = wsgi_input.read()
query_dict.update(parse_qs(qs_in_body, keep_blank_values=True,
strict_parsing=False))
return query_dict
def dump_checkpoint(self):
current_time = time.time()
if self.last_dump_at is None or self.last_dump_at +\
self.dump_interval < current_time:
self.dump_pool.spawn_n(self.profile_log.dump_profile,
self.profiler, os.getpid())
self.last_dump_at = current_time
def __call__(self, environ, start_response):
request = Request(environ)
path_entry = request.path_info.split('/')
# hijack favicon request sent by browser so that it doesn't
# invoke profiling hook and contaminate the data.
if path_entry[1] == 'favicon.ico':
start_response('200 OK', [])
return ''
elif path_entry[1] == self.path:
try:
self.dump_checkpoint()
query_dict = self._combine_body_qs(request)
content, headers = self.viewer.render(request.url,
request.method,
path_entry,
query_dict,
self.renew_profile)
start_response('200 OK', headers)
return [bytes_(content)]
except MethodNotAllowed as mx:
start_response('405 Method Not Allowed', [])
return '%s' % mx
except NotFoundException as nx:
start_response('404 Not Found', [])
return '%s' % nx
except ProfileException as pf:
start_response('500 Internal Server Error', [])
return '%s' % pf
except Exception as ex:
start_response('500 Internal Server Error', [])
return _('Error on render profiling results: %s') % ex
else:
_locals = locals()
code = self.unwind and PROFILE_EXEC_EAGER or\
PROFILE_EXEC_LAZY
self.profiler.runctx(code, globals(), _locals)
app_iter = _locals['app_iter_']
self.dump_checkpoint()
return app_iter
def renew_profile(self):
self.profiler = get_profiler(self.profile_module)
def get_profiler(profile_module):
if profile_module == 'eventlet.green.profile':
eprofile.Profile._setup = new_setup
eprofile.Profile.runctx = new_runctx
eprofile.Profile.runcall = new_runcall
# hacked method to import profile module supported in python 2.6
__import__(profile_module)
return sys.modules[profile_module].Profile()
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def profile_filter(app):
return ProfileMiddleware(app, conf)
return profile_filter
|
apache-2.0
|
Akshay0724/scikit-learn
|
sklearn/utils/estimator_checks.py
|
7
|
63731
|
from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
yield check_get_params_invariance
yield check_dict_unchanged
yield check_no_fit_attributes_set_in_init
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
try:
check(name, Estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
with ignore_warnings(category=DeprecationWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_pandas_series(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_list(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, Estimator):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
# should be just `estimator.fit(X, y)`
# after merging #6141
if name in ['SpectralBiclustering']:
estimator.fit(X)
else:
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
def check_dont_overwrite_parameters(name, Estimator):
# check that fit method only changes or sets private attributes
if hasattr(Estimator.__init__, "deprecated_original"):
# to not check deprecated classes
return
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings(category=DeprecationWarning)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
@ignore_warnings(category=DeprecationWarning)
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with ignore_warnings(category=DeprecationWarning):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if Estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
if not hasattr(alg, 'partial_fit'):
# check again as for mlp this depends on algorithm
return
set_testing_parameters(alg)
try:
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_array_almost_equal(y_log_prob, np.log(y_prob), 8)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=DeprecationWarning)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=DeprecationWarning)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
@ignore_warnings(category=DeprecationWarning)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=DeprecationWarning)
def check_non_transformer_estimators_n_iter(name, Estimator):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(name, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_transformer_n_iter(name, Estimator):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = Estimator()
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_get_params_invariance(name, estimator):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
|
bsd-3-clause
|
tdhopper/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
98
|
20870
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
|
bsd-3-clause
|
bmmalone/pymisc-utils
|
tests/ml_utils_test.py
|
1
|
1025
|
""" Tests for the `pyllars.ml_utils module.
"""
import pytest
import pyllars.ml_utils as ml_utils
import numpy as np
import pandas as pd
@pytest.fixture
def colors():
""" Create an array with different color names
"""
colors = np.array([
'Green',
'Green',
'Yellow',
'Green',
'Yellow',
'Green',
'Red',
'Red',
'Red'
])
return colors
def test_get_cv_folds_array(colors):
""" Test splitting into folds with a numpy array
"""
expected_output = np.array([1, 1, 0, 0, 1, 0, 0, 1, 0])
actual_output = ml_utils.get_cv_folds(colors, num_splits=2)
np.testing.assert_array_equal(expected_output, actual_output)
def test_get_cv_folds_series(colors):
""" Test splitting into folds with a pandas series
"""
expected_output = pd.Series([1, 1, 0, 0, 1, 0, 0, 1, 0])
actual_output = ml_utils.get_cv_folds(colors, num_splits=2)
np.testing.assert_array_equal(expected_output, actual_output)
|
mit
|
FederatedAI/FATE
|
examples/benchmark_quality/homo_nn/local-homo_nn.py
|
1
|
3195
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pathlib
import pandas
from pipeline.utils.tools import JobConfig
from tensorflow.keras import Sequential
from tensorflow.keras import optimizers
import tensorflow.keras.layers
from tensorflow.keras.utils import to_categorical
dataset = {
"vehicle": {
"guest": "examples/data/vehicle_scale_homo_guest.csv",
"host": "examples/data/vehicle_scale_homo_host.csv",
},
"breast": {
"guest": "examples/data/breast_homo_guest.csv",
"host": "examples/data/breast_homo_host.csv",
},
}
def main(config="../../config.yaml", param="param_conf.yaml"):
if isinstance(param, str):
param = JobConfig.load_from_file(param)
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
epoch = param["epoch"]
lr = param["lr"]
batch_size = param.get("batch_size", -1)
optimizer_name = param.get("optimizer", "Adam")
loss = param.get("loss", "categorical_crossentropy")
metrics = param.get("metrics", ["accuracy"])
layers = param["layers"]
is_multy = param["is_multy"]
data = dataset[param.get("dataset", "vehicle")]
model = Sequential()
for layer_config in layers:
layer = getattr(tensorflow.keras.layers, layer_config["name"])
layer_params = layer_config["params"]
model.add(layer(**layer_params))
model.compile(
optimizer=getattr(optimizers, optimizer_name)(learning_rate=lr),
loss=loss,
metrics=metrics,
)
data_path = pathlib.Path(data_base_dir)
data_with_label = pandas.concat(
[
pandas.read_csv(data_path.joinpath(data["guest"]), index_col=0),
pandas.read_csv(data_path.joinpath(data["host"]), index_col=0),
]
).values
data = data_with_label[:, 1:]
if is_multy:
labels = to_categorical(data_with_label[:, 0])
else:
labels = data_with_label[:, 0]
if batch_size < 0:
batch_size = len(data_with_label)
model.fit(data, labels, epochs=epoch, batch_size=batch_size)
evaluate = model.evaluate(data, labels)
metric_summary = {"accuracy": evaluate[1]}
data_summary = {}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str, help="config file for params")
args = parser.parse_args()
if args.param is not None:
main(args.param)
main()
|
apache-2.0
|
jeffery-do/Vizdoombot
|
doom/lib/python3.5/site-packages/matplotlib/backends/backend_qt4agg.py
|
8
|
2205
|
"""
Render to qt from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os # not used
import sys
import ctypes
import warnings
import matplotlib
from matplotlib.figure import Figure
from .backend_qt5agg import FigureCanvasQTAggBase as _FigureCanvasQTAggBase
from .backend_agg import FigureCanvasAgg
from .backend_qt4 import QtCore
from .backend_qt4 import FigureManagerQT
from .backend_qt4 import FigureCanvasQT
from .backend_qt4 import NavigationToolbar2QT
##### not used
from .backend_qt4 import show
from .backend_qt4 import draw_if_interactive
from .backend_qt4 import backend_version
######
DEBUG = False
_decref = ctypes.pythonapi.Py_DecRef
_decref.argtypes = [ctypes.py_object]
_decref.restype = None
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG:
print('backend_qt4agg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num)
class FigureCanvasQTAggBase(_FigureCanvasQTAggBase):
def __init__(self, figure):
self._agg_draw_pending = False
class FigureCanvasQTAgg(FigureCanvasQTAggBase,
FigureCanvasQT, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQtAgg: ', figure)
FigureCanvasQT.__init__(self, figure)
FigureCanvasQTAggBase.__init__(self, figure)
FigureCanvasAgg.__init__(self, figure)
self._drawRect = None
self.blitbox = None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
FigureCanvas = FigureCanvasQTAgg
FigureManager = FigureManagerQT
|
mit
|
gpersistence/tstop
|
scripts/plots/ucr_csv_results.py
|
1
|
2857
|
#TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import argparse
import csv
import numpy as np
import matplotlib.pyplot as plt
infile = sys.argv[1]
full_data = []
keys = None
with open(infile, 'rb') as csvfile :
values = csv.reader(csvfile, delimiter=',')
for row in values :
if keys == None:
keys = [v for v in row if v != '']
else :
data = dict(zip(keys, row))
full_data.append(data)
plot_data = [(data['Dataset'],
data['Data Type'],
float(data['RBF'][0:-1])/100.0 if data['RBF'] != '' else None,
float(data['Window Size 10'][0:-1])/100.0 if data['Window Size 10'] != '' else None,
float(data['Window Size 20'][0:-1])/100.0 if data['Window Size 20'] != '' else None,
float(data['Window Size 30'][0:-1])/100.0 if data['Window Size 30'] != '' else None,
) for data in full_data]
fig = plt.figure()
ax = fig.add_axes([0.1, 0.3, 0.8, 0.65])
plot_data = [p for p in plot_data if p[2] != None and p[3] != None and p[4] != None and p[5] != None]
plot_data.sort(key=(lambda x: "%s %0.2f" % (x[1], max(x[2:]))))
rbf_xs = [x*9 for x in range(len(plot_data))]
pk_xs = [x*9+3 for x in range(len(plot_data))]
rbf = [p[2] for p in plot_data]
win = [[], [], []]
for p in plot_data:
order = zip(p[3:], [0,1,2])
order.sort()
win[order[0][1]].append((0.0, order[0][0]))
win[order[1][1]].append((order[0][0], order[1][0] - order[0][0]))
win[order[2][1]].append((order[1][0], order[2][0] - order[1][0]))
plots = []
plots.append(ax.bar(left=rbf_xs, height=rbf, width=3, color="#DB7D2B"))
plots.append(ax.bar(left=pk_xs, height=[w[1] for w in win[0]], width=3, bottom=[w[0] for w in win[0]], color="#a8ddb5"))
plots.append(ax.bar(left=pk_xs, height=[w[1] for w in win[1]], width=3, bottom=[w[0] for w in win[1]], color="#7bccc4"))
plots.append(ax.bar(left=pk_xs, height=[w[1] for w in win[2]], width=3, bottom=[w[0] for w in win[2]], color="#4eb3d3"))
ax.set_ylabel("Accuracy")
ax.set_xticks(pk_xs)
ax.set_xticklabels([p[0] for p in plot_data], rotation=90)
plt.legend(plots, ("RBF Kernel", "Window Size 10", "Window Size 20", "Window Size 30"), ncol=4).draggable()
plt.show()
|
gpl-3.0
|
kaz-Anova/ensemble_amazon
|
amazon_main_xgboost_count.py
|
1
|
5957
|
""" Amazon Access Challenge Code for ensemble
Marios Michaildis script for Amazon .
Uses counts as features and xgboost
based on Paul Duan's Script.
"""
from __future__ import division
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
import XGBoostClassifier as xg
from sklearn.cross_validation import StratifiedKFold
import pandas as pd
SEED = 42 # always use a seed for randomized procedures
def load_datacount(tr,te):
#w ewill use pandas
train = pd.read_csv(tr, sep=',',quotechar='"')
test = pd.read_csv(te, sep=',',quotechar='"')
label= np.array(train['ACTION']).astype(float)
train.drop('ACTION', axis=1, inplace=True)
test.drop('id', axis=1, inplace=True)
test.drop('ROLE_CODE', axis=1, inplace=True)
train.drop('ROLE_CODE', axis=1, inplace=True)
train_s = train
test_s = test
result = pd.concat([test_s,train_s])
headers=[f for f in result.columns]
for i in range(train_s.shape[1]):
print headers[i], len(np.unique(result[headers[i]]))
cnt = result[headers[i]].value_counts().to_dict()
#cnt = dict((k, -1) if v < 3 else (k,v) for k, v in cnt.items() ) # if u want to encode rare values as "special"
train_s[headers[i]].replace(cnt, inplace=True)
test_s[headers[i]].replace(cnt, inplace=True)
train = np.array(train_s).astype(float)
test = np.array(test_s).astype(float)
print train.shape
print test.shape
return label, train,test
def save_results(predictions, filename):
"""Given a vector of predictions, save results in CSV format."""
with open(filename, 'w') as f:
f.write("id,ACTION\n")
for i, pred in enumerate(predictions):
f.write("%d,%f\n" % (i + 1, pred))
def bagged_set(X_t,y_c,model, seed, estimators, xt, update_seed=True):
# create array object to hold predictions
baggedpred=[ 0.0 for d in range(0, (xt.shape[0]))]
#loop for as many times as we want bags
for n in range (0, estimators):
#shuff;e first, aids in increasing variance and forces different results
#X_t,y_c=shuffle(Xs,ys, random_state=seed+n)
if update_seed: # update seed if requested, to give a slightly different model
model.set_params(random_state=seed + n)
model.fit(X_t,y_c) # fit model0.0917411475506
preds=model.predict_proba(xt)[:,1] # predict probabilities
# update bag's array
for j in range (0, (xt.shape[0])):
baggedpred[j]+=preds[j]
# divide with number of bags to create an average estimate
for j in range (0, len(baggedpred)):
baggedpred[j]/=float(estimators)
# return probabilities
return np.array(baggedpred)
# using numpy to print results
def printfilcsve(X, filename):
np.savetxt(filename,X)
def main():
"""
Fit models and make predictions.
We'll use one-hot encoding to transform our categorical features
into binary features.
y and X will be numpy array objects.
"""
filename="main_xgboos_count" # nam prefix
#model = linear_model.LogisticRegression(C=3) # the classifier we'll use
model=xg.XGBoostClassifier(num_round=1000 ,nthread=25, eta=0.02, gamma=1,max_depth=20, min_child_weight=0.1, subsample=0.9,
colsample_bytree=0.5,objective='binary:logistic',seed=1)
# === load data in memory === #
print "loading data"
y, X,X_test = load_datacount('train.csv','test.csv')
# === one-hot encoding === #
# we want to encode the category IDs encountered both in
# the training and the test set, so we fit the encoder on both
# if you want to create new features, you'll need to compute them
# before the encoding, and append them to your dataset after
#create arrays to hold cv an dtest predictions
train_stacker=[ 0.0 for k in range (0,(X.shape[0])) ]
# === training & metrics === #
mean_auc = 0.0
bagging=20 # number of models trained with different seeds
n = 5 # number of folds in strattified cv
kfolder=StratifiedKFold(y, n_folds= n,shuffle=True, random_state=SEED)
i=0
for train_index, test_index in kfolder: # for each train and test pair of indices in the kfolder object
# creaning and validation sets
X_train, X_cv = X[train_index], X[test_index]
y_train, y_cv = np.array(y)[train_index], np.array(y)[test_index]
#print (" train size: %d. test size: %d, cols: %d " % ((X_train.shape[0]) ,(X_cv.shape[0]) ,(X_train.shape[1]) ))
# if you want to perform feature selection / hyperparameter
# optimization, this is where you want to do it
# train model and make predictions
preds=bagged_set(X_train,y_train,model, SEED , bagging, X_cv, update_seed=True)
# compute AUC metric for this CV fold
roc_auc = roc_auc_score(y_cv, preds)
print "AUC (fold %d/%d): %f" % (i + 1, n, roc_auc)
mean_auc += roc_auc
no=0
for real_index in test_index:
train_stacker[real_index]=(preds[no])
no+=1
i+=1
mean_auc/=n
print (" Average AUC: %f" % (mean_auc) )
print (" printing train datasets ")
printfilcsve(np.array(train_stacker), filename + ".train.csv")
# === Predictions === #
# When making predictions, retrain the model on the whole training set
preds=bagged_set(X, y,model, SEED, bagging, X_test, update_seed=True)
#create submission file
printfilcsve(np.array(preds), filename+ ".test.csv")
#save_results(preds, filename+"_submission_" +str(mean_auc) + ".csv")
if __name__ == '__main__':
main()
|
apache-2.0
|
frc1418/2014
|
driver_station/src/ui/graphwindow.py
|
1
|
3965
|
#
# This file is part of Team 1418 Dashboard
#
# Team 1418 Dashboard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# Team 1418 Dashboard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Team 1418 Dashboard. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import time
import math
#from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
#from matplotlib.backends.backend_gtkcairo import FigureCanvasGTKCairo as FigureCanvas
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar
from matplotlib.figure import Figure
from numpy import arange
import util
from widgets import (
network_tables,
)
import logging
logger = logging.getLogger(__name__)
class GraphPlot(object):
# glade file to load
ui_filename = "GraphPlot.ui"
# widgets to load from the glade file. Each one of these is added to 'self' after
# you call 'initialize_from_xml'
ui_widgets = [
'window',
'graphImage',
'toolbar'
]
HISTORY = 3
def __init__(self, NetworkTable):
util.initialize_from_xml(self)
self.dead = False
self.plots = []
self.count = 0
self.netTable = NetworkTable
self.netTable.PutBoolean('EnableTuning',True)
self.figure = Figure(figsize=(5,4), dpi=100)
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self.figure) # a gtk.DrawingArea
self.graphImage = util.replace_widget(self.graphImage, self.canvas)
self.toolbar = util.replace_widget(self.toolbar, NavigationToolbar(self.canvas, self.window))
self.window.show_all()
# listen to network tables variables
network_tables.attach_fn(self.netTable, "Catapult Values", self.on_update_CatapultValues, self.window)
network_tables.attach_fn(self.netTable, "EnableTuning", self.on_update_EnableTuning, self.window)
def on_update_EnableTuning(self, key, value):
if not self.dead and not value:
self.netTable.PutBoolean('EnableTuning', True)
def on_update_CatapultValues(self, key, value):
arraybutitsastring = self.netTable.GetString('Catapult Values', key)
print(arraybutitsastring, 'String version')
array=eval(arraybutitsastring)
print(array, 'array version')
self.count += 1
step = 0.025
x = arange(0, len(array)*step, step)
plot = self.axes.plot(x, array, label=str(self.count))
# clear old things
if len(self.axes.lines) > self.HISTORY:
self.axes.lines.pop(0)
self.axes.legend()
self.canvas.draw()
def on_destroy(self, window):
self.dead = True
self.netTable.PutBoolean('EnableTuning',False)
class GraphOpener(object):
def __init__(self, NetworkTable):
self.graphPlot = None
self.netTable = NetworkTable
def show(self):
if self.graphPlot == None:
self.graphPlot = GraphPlot(self.netTable)
self.graphPlot.window.connect("destroy", self.on_destroy)
def on_destroy(self, widget):
self.graphPlot=None
|
bsd-3-clause
|
allenlavoie/tensorflow
|
tensorflow/python/estimator/canned/linear_testing_utils.py
|
10
|
80463
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for testing linear estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
AGE_WEIGHT_NAME = 'linear/linear_model/age/weights'
HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'
OCCUPATION_WEIGHT_NAME = 'linear/linear_model/occupation/weights'
BIAS_NAME = 'linear/linear_model/bias_weights'
LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables_lib.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
class CheckPartitionerVarHook(session_run_hook.SessionRunHook):
"""A `SessionRunHook` to check a partitioned variable."""
def __init__(self, test_case, var_name, var_dim, partitions):
self._test_case = test_case
self._var_name = var_name
self._var_dim = var_dim
self._partitions = partitions
def begin(self):
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as scope:
scope.reuse_variables()
partitioned_weight = variable_scope.get_variable(
self._var_name, shape=(self._var_dim, 1))
self._test_case.assertTrue(
isinstance(partitioned_weight, variables_lib.PartitionedVariable))
for part in partitioned_weight:
self._test_case.assertEqual(self._var_dim // self._partitions,
part.get_shape()[0])
class BaseLinearRegressorPartitionerTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def testPartitioner(self):
x_dim = 64
partitions = 4
def _partitioner(shape, dtype):
del dtype # unused; required by Fn signature.
# Only partition the embedding tensor.
return [partitions, 1] if shape[0] == x_dim else [1]
regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
partitioner=_partitioner,
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
def testDefaultPartitionerWithMultiplePsReplicas(self):
partitions = 2
# This results in weights larger than the default partition size of 64M,
# so partitioned weights are created (each weight uses 4 bytes).
x_dim = 32 << 20
class FakeRunConfig(run_config.RunConfig):
@property
def num_ps_replicas(self):
return partitions
# Mock the device setter as ps is not available on test machines.
with test.mock.patch.object(
estimator,
'_get_replica_device_setter',
return_value=lambda _: '/cpu:0'):
linear_regressor = self._linear_regressor_fn(
feature_columns=(
feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
config=FakeRunConfig(),
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaseLinearRegressorEvaluationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables_lib.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([2.0], name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables_lib.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([2.0], name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables_lib.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([2.0], name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
x_dim = 3
label_dim = 2
with ops.Graph().as_default():
variables_lib.Variable(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([7.0, 8.0], name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column(
'age', shape=(x_dim,)),),
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
# [3.0, 4.0]
# [5.0, 6.0]
# which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns(self):
with ops.Graph().as_default():
variables_lib.Variable([[10.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
variables_lib.Variable([5.0], name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
feature_column_lib.numeric_column('age'),
feature_column_lib.numeric_column('height')
]
input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([20, 40]),
'height': np.array([4, 8])},
y=np.array([[213.], [421.]]),
batch_size=batch_size,
num_epochs=None,
shuffle=False)
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaseLinearRegressorPredictTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables_lib.Variable([[10.]], name='linear/linear_model/x/weights')
variables_lib.Variable([.2], name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x'),),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[20.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
x_dim = 4
feature_columns = (feature_column_lib.numeric_column('x', shape=(x_dim,)),)
with ops.Graph().as_default():
variables_lib.Variable( # shape=[x_dim, label_dimension]
[[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],
name='linear/linear_model/x/weights')
variables_lib.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = x * weight + bias, shape=[batch_size, label_dimension]
self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],
predicted_scores)
def testTwoFeatureColumns(self):
"""Tests predict with two feature columns."""
with ops.Graph().as_default():
variables_lib.Variable([[10.]], name='linear/linear_model/x0/weights')
variables_lib.Variable([[20.]], name='linear/linear_model/x1/weights')
variables_lib.Variable([.2], name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1')),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x0': np.array([[2.]]),
'x1': np.array([[3.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
class BaseLinearRegressorIntegrationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaseLinearRegressorTrainingTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer_lib.Optimizer,
wraps=optimizer_lib.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertEqual(expected_age_weight,
checkpoint_utils.load_variable(self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([1], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create LinearRegressor.
label = 5.
age = 17
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testFromScratch(self):
# Create LinearRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=num_steps,
expected_age_weight=0.,
expected_bias=0.)
def testFromCheckpoint(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([bias], name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 10. + 5. = 175
# loss = (logits - label)^2 = (175 - 5)^2 = 28900
mock_optimizer = self._mock_optimizer(expected_loss=28900.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([bias], name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias
# logits[0] = 17 * 10. + 5. = 175
# logits[1] = 15 * 10. + 5. = 155
# loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004
mock_optimizer = self._mock_optimizer(expected_loss=52004.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
class BaseLinearClassifierTrainingTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return distribute_lib.increment_var(global_step)
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return distribute_lib.increment_var(global_step)
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer_lib.Optimizer,
wraps=optimizer_lib.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_age_weight=None,
expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, logits_dimension],
shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertAllEqual(expected_age_weight,
checkpoint_utils.load_variable(
self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formular
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = 17 * age_weight + bias and label = 1
# so, loss = 1 * -log ( soft_max(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
age_weight = [[2.0]]
bias = [-35.0]
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias
# logits[0] = 17 * 2. - 35. = -1.
# logits[1] = 18.5 * 2. - 35. = 2.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = [17, 18.5] * age_weight + bias and label = [1, 0]
# so, loss = 1 * -log ( soft_max(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 2.1269)
else:
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaseLinearClassifierEvaluationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = sum(corss_entropy(41)) = 41.
expected_metrics = {
metric_keys.MetricKeys.LOSS: 41.,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 41.,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
expected_loss = 1.3133 * 2
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.25,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
# weights = [1., 2.]
expected_loss = 1.3133 * (1. + 2.)
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, 1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.1668,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaseLinearClassifierPredictTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = np.asscalar(
np.reshape(np.array(age_weight) * age + bias, (1,)))
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [0],
'classes': [label_output_fn(0)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaseLinearClassifierIntegrationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_classifier_fn(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
class BaseLinearLogitFnTest(object):
def test_basic_logit_correctness(self):
"""linear_logit_fn simply wraps feature_column_lib.linear_model."""
age = feature_column_lib.numeric_column('age')
with ops.Graph().as_default():
logit_fn = linear._linear_logit_fn_builder(units=2, feature_columns=[age])
logits = logit_fn(features={'age': [[23.], [31.]]})
bias_var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
'linear_model/bias_weights')[0]
age_var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
'linear_model/age')[0]
with tf_session.Session() as sess:
sess.run([variables_lib.global_variables_initializer()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
sess.run(age_var.assign([[2.0, 3.0]]))
# [2 * 23 + 10, 3 * 23 + 5] = [56, 74].
# [2 * 31 + 10, 3 * 31 + 5] = [72, 98]
self.assertAllClose([[56., 74.], [72., 98.]], logits.eval())
def test_compute_fraction_of_zero(self):
"""Tests the calculation of sparsity."""
age = feature_column_lib.numeric_column('age')
occupation = feature_column_lib.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with ops.Graph().as_default():
cols_to_vars = {}
feature_column_lib.linear_model(
features={
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
},
feature_columns=[age, occupation],
units=3,
cols_to_vars=cols_to_vars)
cols_to_vars.pop('bias')
fraction_zero = linear._compute_fraction_of_zero(cols_to_vars)
age_var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
'linear_model/age')[0]
with tf_session.Session() as sess:
sess.run([variables_lib.global_variables_initializer()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
class BaseLinearWarmStartingTest(object):
def __init__(self, _linear_classifier_fn, _linear_regressor_fn):
self._linear_classifier_fn = _linear_classifier_fn
self._linear_regressor_fn = _linear_regressor_fn
def setUp(self):
# Create a directory to save our old checkpoint and vocabularies to.
self._ckpt_and_vocab_dir = tempfile.mkdtemp()
# Make a dummy input_fn.
def _input_fn():
features = {
'age': [[23.], [31.]],
'age_in_years': [[23.], [31.]],
'occupation': [['doctor'], ['consultant']]
}
return features, [0, 1]
self._input_fn = _input_fn
def tearDown(self):
# Clean up checkpoint / vocab dir.
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._ckpt_and_vocab_dir)
def test_classifier_basic_warm_starting(self):
"""Tests correctness of LinearClassifier default warm-start."""
age = feature_column_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
warm_start_from=linear_classifier.model_dir)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_classifier.get_variable_names():
self.assertAllClose(
linear_classifier.get_variable_value(variable_name),
warm_started_linear_classifier.get_variable_value(variable_name))
def test_regressor_basic_warm_starting(self):
"""Tests correctness of LinearRegressor default warm-start."""
age = feature_column_lib.numeric_column('age')
# Create a LinearRegressor and train to save a checkpoint.
linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
optimizer='SGD')
linear_regressor.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearRegressor, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
warm_start_from=linear_regressor.model_dir)
warm_started_linear_regressor.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_regressor.get_variable_names():
self.assertAllClose(
linear_regressor.get_variable_value(variable_name),
warm_started_linear_regressor.get_variable_value(variable_name))
def test_warm_starting_selective_variables(self):
"""Tests selecting variables to warm-start."""
age = feature_column_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
# The provided regular expression will only warm-start the age variable
# and not the bias.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
vars_to_warm_start='.*(age).*'))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(AGE_WEIGHT_NAME),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# Bias should still be zero from initialization.
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_vocab_remapping_and_partitioning(self):
"""Tests warm-starting with vocab remapping and partitioning."""
vocab_list = ['doctor', 'lawyer', 'consultant']
vocab_file = os.path.join(self._ckpt_and_vocab_dir, 'occupation_vocab')
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_list))
occupation = feature_column_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=vocab_file,
vocabulary_size=len(vocab_list))
# Create a LinearClassifier and train to save a checkpoint.
partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2)
linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD',
partitioner=partitioner)
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change). Use a new FeatureColumn with a
# different vocabulary for occupation.
new_vocab_list = ['doctor', 'consultant', 'engineer']
new_vocab_file = os.path.join(self._ckpt_and_vocab_dir,
'new_occupation_vocab')
with open(new_vocab_file, 'w') as f:
f.write('\n'.join(new_vocab_list))
new_occupation = feature_column_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=new_vocab_file,
vocabulary_size=len(new_vocab_list))
# We can create our VocabInfo object from the new and old occupation
# FeatureColumn's.
occupation_vocab_info = estimator.VocabInfo(
new_vocab=new_occupation.vocabulary_file,
new_vocab_size=new_occupation.vocabulary_size,
num_oov_buckets=new_occupation.num_oov_buckets,
old_vocab=occupation.vocabulary_file,
old_vocab_size=occupation.vocabulary_size,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.39, maxval=0.39))
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
n_classes=4,
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_vocab_info={
OCCUPATION_WEIGHT_NAME: occupation_vocab_info
},
# Explicitly providing None here will only warm-start variables
# referenced in var_name_to_vocab_info (the bias will not be
# warm-started).
vars_to_warm_start=None),
partitioner=partitioner)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# 'doctor' was ID-0 and still ID-0.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[0, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[0, :])
# 'consultant' was ID-2 and now ID-1.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[2, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[1, :])
# 'engineer' is a new entry and should be initialized with the
# backup_initializer in VocabInfo.
self.assertAllClose([0.39] * 4,
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[2, :])
# Bias should still be zero (from initialization logic).
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_naming_change(self):
"""Tests warm-starting with a Tensor name remapping."""
age_in_years = feature_column_lib.numeric_column('age_in_years')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age_in_years],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[feature_column_lib.numeric_column('age')],
n_classes=4,
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
# The 'age' variable correspond to the 'age_in_years' variable in the
# previous model.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_prev_var_name={
AGE_WEIGHT_NAME: AGE_WEIGHT_NAME.replace('age', 'age_in_years')
}))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(
AGE_WEIGHT_NAME.replace('age', 'age_in_years')),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# The bias is also warm-started (with no name remapping).
self.assertAllClose(
linear_classifier.get_variable_value(BIAS_NAME),
warm_started_linear_classifier.get_variable_value(BIAS_NAME))
|
apache-2.0
|
gfetterman/bark
|
bark/tools/datsegment.py
|
2
|
7250
|
import os.path
import numpy as np
import bark
default_fftn = 512
default_step_ms = 1
default_min_syl = 30
default_min_silent = 20
default_threshold = 6
default_highcut = 6e3
default_lowcut = 2e3
def amplitude_stream(data, sr, fftn, step, lowcut, highcut):
'''returns an iterator with the start time in seconds
and threshold value for each chunk'''
from scipy.signal import hamming
all_fft_freqs = np.fft.fftfreq(fftn, sr** -1)
fft_freqs = (all_fft_freqs >= lowcut) & (all_fft_freqs <= highcut)
window = hamming(fftn)
data = data.ravel()
for i in range(0, len(data) - fftn, step):
x = data[i:i + fftn] * window
fft = np.fft.fft(x, fftn)[fft_freqs]
time = (i + fftn / 2) / sr
yield time, np.mean(np.log(np.abs(fft)))
def amplitude_stream_td(data, sr, fftn, step, lowcut, highcut):
' Time domain version of the amplitude stream'
datastream = bark.stream.Stream(data, sr)
amplitude = (datastream.butter(highpass=lowcut,
lowpass=highcut,
zerophase=False,
order=1).map(abs)
.bessel(lowpass=(step / sr)** -1).rechunk(step))
i = 0
for buffer in amplitude:
yield i / sr, np.log(buffer[0])
i += step
def first_pass(amp_stream, thresh):
'creates segments from all threshold crossings'
starts = []
stops = []
in_syl = False
for time, amp in amp_stream:
if not in_syl and amp >= thresh:
starts.append(time)
in_syl = True
if in_syl and amp < thresh:
stops.append(time)
in_syl = False
# in case the recording ends in a syllable add last point to stops
if in_syl:
stops.append(time)
return starts, stops
def second_pass(starts, stops, min_silent):
' If two syllables are within min_silent, join them'
i = 1
while i < len(starts):
if starts[i] - stops[i - 1] <= min_silent:
stops[i - 1] = stops[i]
del starts[i]
del stops[i]
else:
i += 1
def third_pass(starts, stops, min_syl):
' If a syllable is too short, remove'
i = 0
while i < len(starts):
if stops[i] - starts[i] <= min_syl:
del starts[i]
del stops[i]
else:
i += 1
def make_attrs(**kwargs):
attrs = kwargs.copy()
attrs['columns'] = {'start': {'units': 's'},
'stop': {'units': 's'},
'name': {'units': None}}
attrs['datatype'] = 2000
return attrs
def main(datname,
outfile=None,
fftn=default_fftn,
step_ms=default_step_ms,
min_syl_ms=default_min_syl,
min_silent_ms=default_min_silent,
thresh=default_threshold,
lowcut=default_lowcut,
highcut=default_highcut,
time_domain=False,
label=''):
from pandas import DataFrame
if not outfile:
outfile = os.path.splitext(datname)[0] + '.csv'
min_syl = min_syl_ms / 1000
min_silent = min_silent_ms / 1000
sampled = bark.read_sampled(datname)
assert sampled.data.shape[1] == 1
sr = sampled.sampling_rate
step = int((step_ms / 1000) * sr) # convert to samples
if time_domain:
amplitude_function = amplitude_stream_td
else:
amplitude_function = amplitude_stream
amp_stream = amplitude_function(sampled.data,
sr,
fftn,
step,
lowcut=lowcut,
highcut=highcut)
start, stop = first_pass(amp_stream, thresh)
second_pass(start, stop, min_silent)
third_pass(start, stop, min_syl)
attrs = make_attrs(segment_source=outfile,
segment_step_ms=step_ms,
segment_thresh=thresh,
segment_lowcut=lowcut,
segment_highcut=highcut,
segment_time_domain=time_domain,
segment_min_syl_ms=min_syl_ms,
segment_min_silent_ms=min_silent_ms)
bark.write_events(outfile,
DataFrame(dict(start=start,
stop=stop,
name=label)),
**attrs)
def _run():
''' Function for getting commandline args.'''
import argparse
p = argparse.ArgumentParser(description='''
Create a segment label file.
Uses method from Koumura & Okanoya 2016.
First an amplitude envelope with a frequency band is computed.
Then from these threshold crossings, any short gap is annealed,
and any short syllable is removed.
''')
p.add_argument('dat', help='name of a sampled dataset')
p.add_argument('-o',
'--out',
help='Name of output event dataset \
defaults to input file with a .csv extension.')
p.add_argument('-n',
'--fftn',
help='number of fft coeficients',
type=int,
default=default_fftn)
p.add_argument('-s',
'--step',
help='step size in milliseconds, default: {}'
.format(default_step_ms),
type=int,
default=default_step_ms)
p.add_argument('--min-syl',
help='minimum syllable length in ms, default: {}'
.format(default_min_syl),
type=int,
default=default_min_syl)
p.add_argument('--min-silent',
help='minimum silence length in ms, default: {}'
.format(default_min_silent),
type=int,
default=default_min_silent)
p.add_argument('-t',
'--threshold',
help='syllable threshold, default: {}'
.format(default_threshold),
default=default_threshold,
type=float)
p.add_argument('--lowfreq',
help='low frequency to use for amplitude, default: {}'
.format(default_lowcut),
default=default_lowcut,
type=float)
p.add_argument('--highfreq',
help='highest frequency to use for amplitude, default: {}'
.format(default_highcut),
default=default_highcut,
type=float)
p.add_argument('--timedomain',
help='uses a time domain thresholding method instead of \
spectral, may be faster but less accurate',
action='store_true')
p.add_argument('--label',
help='label to give found segments, default is an empty string',
default='')
args = p.parse_args()
main(args.dat, args.out, args.fftn, args.step, args.min_syl,
args.min_silent, args.threshold, args.lowfreq, args.highfreq,
args.timedomain, args.label)
if __name__ == '__main__':
_run()
|
gpl-2.0
|
bernoullio/toolbox
|
tests/test_ikh.py
|
1
|
1192
|
import pytest
import pandas as pd
from ..forex_toolbox.indicators.ikh import *
def test_mark_signal():
data = pd.read_csv("fixtures/range_ikh.csv")
data = mark_signal(data)
print("Buy:")
print(data.loc[data['buy']==1])
print("Sell:")
print(data.loc[data['sell']==1])
def test_lines_data():
data = pd.read_csv("fixtures/ikh_price.csv")
total_periods = len(data)
ikh_lines = lines_data(data.price)
assert ikh_lines.base.iloc[-1] == pytest.approx(1.28555)
assert ikh_lines.turn.iloc[-1] == pytest.approx(1.28685)
assert ikh_lines.lag.iloc[0] == pytest.approx(1.3175)
assert ikh_lines.cloud1.iloc[-1] == pytest.approx(1.284875)
assert ikh_lines.cloud2.iloc[-1] == pytest.approx(1.28595)
assert set(ikh_lines.keys()) == set(['price', 'base', 'turn', 'lag', 'cloud1', 'cloud2'])
assert len(ikh_lines.base.dropna()) == total_periods - 26 + 1
assert len(ikh_lines.turn.dropna()) == total_periods - 9 + 1
assert len(ikh_lines.lag.dropna()) == total_periods - 26
assert len(ikh_lines.cloud1.dropna()) == total_periods - 52 + 1 # 26 periods ahead
assert len(ikh_lines.cloud2.dropna()) == total_periods - 52 + - 26 + 1
|
mit
|
numenta/htmresearch
|
projects/combined_sequences/generate_plots.py
|
4
|
23002
|
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file plots the results obtained from combined_sequences.py.
"""
import cPickle
import matplotlib.pyplot as plt
from optparse import OptionParser
import os
import sys
from collections import defaultdict
import numpy
import matplotlib as mpl
import traceback
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
def plotOneInferenceRun(stats,
fields,
basename,
itemType="",
plotDir="plots",
ymax=100,
trialNumber=0):
"""
Plots individual inference runs.
"""
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
# plot request stats
for field in fields:
fieldKey = field[0] + " C0"
plt.plot(stats[fieldKey], marker='+', label=field[1])
# format
plt.legend(loc="upper right")
plt.xlabel("Input number")
plt.xticks(range(stats["numSteps"]))
plt.ylabel("Number of cells")
plt.ylim(-5, ymax)
plt.title("Activity while inferring {}".format(itemType))
# save
relPath = "{}_exp_{}.pdf".format(basename, trialNumber)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close()
def plotMultipleInferenceRun(stats,
fields,
basename,
plotDir="plots"):
"""
Plots individual inference runs.
"""
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
# plot request stats
for i, field in enumerate(fields):
fieldKey = field[0] + " C0"
trace = []
for s in stats:
trace += s[fieldKey]
plt.plot(trace, label=field[1], color=colorList[i])
# format
plt.legend(loc="upper right")
plt.xlabel("Input number")
plt.xticks(range(0, len(stats)*stats[0]["numSteps"]+1,5))
plt.ylabel("Number of cells")
plt.ylim(-5, 55)
plt.title("Inferring combined sensorimotor and temporal sequence stream")
# save
relPath = "{}_exp_combined.pdf".format(basename)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close()
def plotAccuracyDuringSensorimotorInference(resultsFig5B, title="", yaxis=""):
"""
Plot accuracy vs number of features
"""
# Read out results and get the ranges we want.
with open(resultsFig5B, "rb") as f:
results = cPickle.load(f)
objectRange = []
featureRange = []
for r in results:
if r["numObjects"] not in objectRange: objectRange.append(r["numObjects"])
if r["numFeatures"] not in featureRange: featureRange.append(r["numFeatures"])
objectRange.sort()
featureRange.sort()
print "objectRange=",objectRange
print "featureRange=",featureRange
########################################################################
#
# Accumulate the TM accuracies for each condition in a list and compute mean
# and stdeviations
# For L2 we average across all feature ranges
accuracies = defaultdict(list)
l2Accuracies = defaultdict(list)
for r in results:
accuracies[(r["numObjects"], r["numFeatures"])].append(r["objectCorrectSparsityTM"])
l2Accuracies[r["numObjects"]].append(r["objectAccuracyL2"])
# meanAccuracy[o,f] = accuracy of TM with o objects and f unique features.
meanAccuracy = numpy.zeros((max(objectRange)+1, max(featureRange) + 1))
stdev = numpy.zeros((max(objectRange)+1, max(featureRange) + 1))
meanL2Accuracy = numpy.zeros(max(objectRange)+1)
stdevL2 = numpy.zeros(max(objectRange)+1)
for o in objectRange:
for f in featureRange:
a = numpy.array(accuracies[(o, f)])
meanAccuracy[o, f] = 100.0*a.mean()
stdev[o, f] = 100.0*a.std()
# Accuracies for L2
a = numpy.array(l2Accuracies[o])
meanL2Accuracy[o] = 100.0*a.mean()
stdevL2[o] = 100.0*a.std()
# Create the plot.
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "accuracy_during_sensorimotor_inference.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
legendList.append('Sequence layer, feature pool size: {}'.format(f))
plt.errorbar(objectRange, meanAccuracy[objectRange, f],
yerr=stdev[objectRange, f],
color=colorList[i])
plt.errorbar(objectRange, meanL2Accuracy[objectRange],
yerr=stdevL2[objectRange],
color=colorList[len(featureRange)])
legendList.append('Sensorimotor layer')
# format
plt.legend(legendList, bbox_to_anchor=(0., 0.6, 1., .102), loc="right", prop={'size':10})
plt.xlabel("Number of objects")
plt.ylim(-10.0, 110.0)
plt.ylabel(yaxis)
plt.title(title)
# save
plt.savefig(plotPath)
plt.close()
def plotAccuracyDuringDecrementChange(results, title="", yaxis=""):
"""
Plot accuracy vs decrement value
"""
decrementRange = []
featureRange = []
for r in results:
if r["basalPredictedSegmentDecrement"] not in decrementRange:
decrementRange.append(r["basalPredictedSegmentDecrement"])
if r["numFeatures"] not in featureRange:
featureRange.append(r["numFeatures"])
decrementRange.sort()
featureRange.sort()
print decrementRange
print featureRange
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# accuracy[o,f] = accuracy with o objects in training
# and f unique features.
accuracy = numpy.zeros((len(featureRange), len(decrementRange)))
TMAccuracy = numpy.zeros((len(featureRange), len(decrementRange)))
totals = numpy.zeros((len(featureRange), len(decrementRange)))
for r in results:
dec = r["basalPredictedSegmentDecrement"]
nf = r["numFeatures"]
accuracy[featureRange.index(nf), decrementRange.index(dec)] += r["objectAccuracyL2"]
TMAccuracy[featureRange.index(nf), decrementRange.index(dec)] += r["sequenceCorrectClassificationsTM"]
totals[featureRange.index(nf), decrementRange.index(dec)] += 1
for i,f in enumerate(featureRange):
print i, f, accuracy[i] / totals[i]
print i, f, TMAccuracy[i] / totals[i]
print
# ########################################################################
# #
# # Create the plot.
# plt.figure()
# plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
# "plots", "accuracy_during_sensorimotor_inference.pdf")
#
# # Plot each curve
# legendList = []
# colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
#
# for i in range(len(featureRange)):
# f = featureRange[i]
# legendList.append('Sequence layer, feature pool size: {}'.format(f))
# plt.plot(objectRange, accuracy[objectRange, f], color=colorList[i])
#
# plt.plot(objectRange, [100] * len(objectRange),
# color=colorList[len(featureRange)])
# legendList.append('Sensorimotor layer')
#
# # format
# plt.legend(legendList, bbox_to_anchor=(0., 0.6, 1., .102), loc="right", prop={'size':10})
# plt.xlabel("Number of objects")
# plt.ylim(-10.0, 110.0)
# plt.ylabel(yaxis)
# plt.title(title)
#
# # save
# plt.savefig(plotPath)
# plt.close()
def plotAccuracyAndMCsDuringDecrementChange(results, title="", yaxis=""):
"""
Plot accuracy vs decrement value
"""
decrementRange = []
mcRange = []
for r in results:
if r["basalPredictedSegmentDecrement"] not in decrementRange:
decrementRange.append(r["basalPredictedSegmentDecrement"])
if r["inputSize"] not in mcRange:
mcRange.append(r["inputSize"])
decrementRange.sort()
mcRange.sort()
print decrementRange
print mcRange
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# accuracy[o,f] = accuracy with o objects in training
# and f unique features.
accuracy = numpy.zeros((len(mcRange), len(decrementRange)))
TMAccuracy = numpy.zeros((len(mcRange), len(decrementRange)))
totals = numpy.zeros((len(mcRange), len(decrementRange)))
for r in results:
dec = r["basalPredictedSegmentDecrement"]
nf = r["inputSize"]
accuracy[mcRange.index(nf), decrementRange.index(dec)] += r["objectAccuracyL2"]
TMAccuracy[mcRange.index(nf), decrementRange.index(dec)] += r["sequenceCorrectClassificationsTM"]
totals[mcRange.index(nf), decrementRange.index(dec)] += 1
for i,f in enumerate(mcRange):
print i, f, accuracy[i] / totals[i]
print i, f, TMAccuracy[i] / totals[i]
print i, f, totals[i]
print
# ########################################################################
# #
# # Create the plot.
# plt.figure()
# plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
# "plots", "accuracy_during_sensorimotor_inference.pdf")
#
# # Plot each curve
# legendList = []
# colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
#
# for i in range(len(featureRange)):
# f = featureRange[i]
# legendList.append('Sequence layer, feature pool size: {}'.format(f))
# plt.plot(objectRange, accuracy[objectRange, f], color=colorList[i])
#
# plt.plot(objectRange, [100] * len(objectRange),
# color=colorList[len(featureRange)])
# legendList.append('Sensorimotor layer')
#
# # format
# plt.legend(legendList, bbox_to_anchor=(0., 0.6, 1., .102), loc="right", prop={'size':10})
# plt.xlabel("Number of objects")
# plt.ylim(-10.0, 110.0)
# plt.ylabel(yaxis)
# plt.title(title)
#
# # save
# plt.savefig(plotPath)
# plt.close()
def plotAccuracyDuringSequenceInference(dirName, title="", yaxis=""):
"""
Plot accuracy vs number of locations
"""
# Read in results file
with open(os.path.join(dirName,
"sequence_batch_high_dec_normal_features.pkl"), "rb") as f:
results = cPickle.load(f)
locationRange = []
featureRange = []
for r in results:
if r["numLocations"] not in locationRange: locationRange.append(r["numLocations"])
if r["numFeatures"] not in featureRange: featureRange.append(r["numFeatures"])
locationRange.sort()
featureRange.sort()
if 10 in featureRange: featureRange.remove(10)
print "locationRange=",locationRange
print "featureRange=",featureRange
########################################################################
#
# Accumulate the L2 accuracies for each condition in a list and compute mean
# and stdeviations
# For TM we average across all feature ranges
L2Accuracies = defaultdict(list)
TMAccuracies = defaultdict(list)
for r in results:
if r["numFeatures"] in featureRange:
L2Accuracies[(r["numLocations"], r["numFeatures"])].append(r["sequenceAccuracyL2"])
TMAccuracies[r["numLocations"]].append(r["sequenceCorrectSparsityTM"])
# meanAccuracy[o,f] = accuracy of TM with o objects and f unique features.
meanL2Accuracy = numpy.zeros((max(locationRange)+1, max(featureRange) + 1))
stdevL2 = numpy.zeros((max(locationRange)+1, max(featureRange) + 1))
meanTMAccuracy = numpy.zeros(max(locationRange)+1)
stdevTM = numpy.zeros(max(locationRange)+1)
for o in locationRange:
for f in featureRange:
a = numpy.array(L2Accuracies[(o, f)])
meanL2Accuracy[o, f] = 100.0*a.mean()
stdevL2[o, f] = 100.0*a.std()
# Accuracies for TM
a = numpy.array(TMAccuracies[o])
meanTMAccuracy[o] = 100.0*a.mean()
stdevTM[o] = 100.0*a.std()
########################################################################
#
# Create the plot.
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "accuracy_during_sequence_inference.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
legendList.append('Sensorimotor layer, feature pool size: {}'.format(f))
plt.errorbar(locationRange, meanL2Accuracy[locationRange, f],
yerr=stdevL2[locationRange, f],
color=colorList[i])
plt.errorbar(locationRange, meanTMAccuracy[locationRange],
yerr=stdevTM[locationRange],
color=colorList[len(featureRange)])
legendList.append('Temporal sequence layer')
# format
plt.legend(legendList, bbox_to_anchor=(0., 0.65, 1., .102), loc="right", prop={'size':10})
plt.xlabel("Size of location pool")
# plt.xticks(range(0,max(locationRange)+1,10))
# plt.yticks(range(0,int(accuracy.max())+2,10))
plt.ylim(-10.0, 110.0)
plt.ylabel(yaxis)
plt.title(title)
# save
plt.savefig(plotPath)
plt.close()
def plotAccuracyVsSequencesDuringSequenceInference(dirName, title="", yaxis=""):
# Read in results file
with open(os.path.join(dirName, "sequences_range_2048_mcs.pkl"), "rb") as f:
results = cPickle.load(f)
sequenceRange = []
featureRange = []
for r in results:
if r["numSequences"] not in sequenceRange: sequenceRange.append(r["numSequences"])
if r["numFeatures"] not in featureRange: featureRange.append(r["numFeatures"])
sequenceRange.sort()
featureRange.sort()
if 10 in featureRange: featureRange.remove(10)
print "numSequences=",sequenceRange
print "featureRange=",featureRange
########################################################################
#
# Accumulate the L2 accuracies for each condition in a list and compute mean
# and stdeviations
# For TM we average across all feature ranges
L2Accuracies = defaultdict(list)
TMAccuracies = defaultdict(list)
for r in results:
if r["numFeatures"] in featureRange:
L2Accuracies[(r["numSequences"], r["numFeatures"])].append(r["sequenceAccuracyL2"])
TMAccuracies[r["numSequences"]].append(r["sequenceCorrectSparsityTM"])
# meanAccuracy[o,f] = accuracy of TM with o objects and f unique features.
meanL2Accuracy = numpy.zeros((max(sequenceRange)+1, max(featureRange) + 1))
stdevL2 = numpy.zeros((max(sequenceRange)+1, max(featureRange) + 1))
meanTMAccuracy = numpy.zeros(max(sequenceRange)+1)
stdevTM = numpy.zeros(max(sequenceRange)+1)
for o in sequenceRange:
for f in featureRange:
a = numpy.array(L2Accuracies[(o, f)])
meanL2Accuracy[o, f] = 100.0*a.mean()
stdevL2[o, f] = 100.0*a.std()
# Accuracies for TM
a = numpy.array(TMAccuracies[o])
meanTMAccuracy[o] = 100.0*a.mean()
stdevTM[o] = 100.0*a.std()
########################################################################
#
# Create the plot.
plt.figure()
plotPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"plots", "accuracy_vs_sequences_2048_mcs.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
legendList.append('Sensorimotor layer, feature pool size: {}'.format(f))
plt.errorbar(sequenceRange, meanL2Accuracy[sequenceRange, f],
yerr=stdevL2[sequenceRange, f],
color=colorList[i])
plt.errorbar(sequenceRange, meanTMAccuracy[sequenceRange],
yerr=stdevTM[sequenceRange],
color=colorList[len(featureRange)])
legendList.append('Temporal sequence layer')
# format
plt.legend(legendList, bbox_to_anchor=(0., 0.65, 1., .102),
loc="right", prop={'size':10})
plt.xlabel("Number of sequences")
# plt.xticks(range(0,max(locationRange)+1,10))
# plt.yticks(range(0,int(accuracy.max())+2,10))
plt.ylim(-10.0, 110.0)
plt.ylabel(yaxis)
plt.title(title)
# save
plt.savefig(plotPath)
plt.close()
def gen4(dirName):
"""Plots 4A and 4B"""
# Generate images similar to those used in the first plot for the section
# "Simulations with Pure Temporal Sequences"
try:
resultsFig4A = os.path.join(dirName, "pure_sequences_example.pkl")
with open(resultsFig4A, "rb") as f:
results = cPickle.load(f)
for trialNum, stat in enumerate(results["statistics"]):
plotOneInferenceRun(
stat,
itemType="a single sequence",
fields=[
("L4 PredictedActive", "Predicted active cells in sensorimotor layer"),
("TM NextPredicted", "Predicted cells in temporal sequence layer"),
("TM PredictedActive",
"Predicted active cells in temporal sequence layer"),
],
basename="pure_sequences",
trialNumber=trialNum,
plotDir=os.path.join(os.path.dirname(os.path.realpath(__file__)),
"detailed_plots")
)
print "Plots for Fig 4A generated in 'detailed_plots'"
except Exception, e:
print "\nCould not generate plots for Fig 4A: "
traceback.print_exc()
print
# Generate the second plot for the section "Simulations with Pure
# Temporal Sequences"
try:
plotAccuracyDuringSequenceInference(
dirName,
title="Relative performance of layers while inferring temporal sequences",
yaxis="Accuracy (%)")
print "Plots for Fig 4B generated in 'plots'"
except Exception, e:
print "\nCould not generate plots for Fig 4B: "
traceback.print_exc()
print
# Generate the accuracy vs number of sequences
try:
plotAccuracyVsSequencesDuringSequenceInference(
dirName,
title="Relative performance of layers while inferring temporal sequences",
yaxis="Accuracy (%)")
print "Plots for Fig 4C generated in 'plots'"
except Exception, e:
print "\nCould not generate plots for Fig 4C: "
traceback.print_exc()
print
def gen5(dirName):
# Generate images similar to the first plot for the section "Simulations with
# Sensorimotor Sequences"
try:
resultsFig5A = os.path.join(dirName, "sensorimotor_sequence_example.pkl")
with open(resultsFig5A, "rb") as f:
results = cPickle.load(f)
for trialNum, stat in enumerate(results["statistics"]):
plotOneInferenceRun(
stat,
itemType="a single object",
fields=[
("L4 PredictedActive", "Predicted active cells in sensorimotor layer"),
("TM NextPredicted", "Predicted cells in temporal sequence layer"),
("TM PredictedActive",
"Predicted active cells in temporal sequence layer"),
],
basename="sensorimotor_sequences",
trialNumber=trialNum,
ymax=50,
plotDir=os.path.join(os.path.dirname(os.path.realpath(__file__)),
"detailed_plots")
)
print "Plots for Fig 5A generated in 'detailed_plots'"
except Exception, e:
print "\nCould not generate plots for Fig 5A: "
traceback.print_exc()
print
# Generate the second plot for the section "Simulations with Sensorimotor
# Sequences"
try:
resultsFig5B = os.path.join(dirName, "sensorimotor_batch_results_more_objects.pkl")
plotAccuracyDuringSensorimotorInference(
resultsFig5B,
title="Relative performance of layers during sensorimotor inference",
yaxis="Accuracy (%)")
print "Plots for Fig 5B generated in 'plots'"
except Exception, e:
print "\nCould not generate plots for Fig 5B: "
traceback.print_exc()
print
def gen6(dirName):
# Generate a plot similar to one in the section "Simulations with Combined
# Sequences". Note that the dashed vertical lines and labels were added in
# manually.
try:
resultsFig6 = os.path.join(dirName, "combined_results.pkl")
# resultsFig6 = os.path.join(dirName, "superimposed_sequence_results.pkl")
if os.path.exists(resultsFig6):
with open(resultsFig6, "rb") as f:
results = cPickle.load(f)
plotMultipleInferenceRun(
results["statistics"][0:10],
fields=[
("L4 PredictedActive", "Predicted active cells in sensorimotor layer"),
("TM PredictedActive",
"Predicted active cells in temporal sequence layer"),
],
basename=results["name"],
plotDir=os.path.join(dirName, "plots")
)
print "Plots for Fig 6 generated in 'plots'"
except Exception, e:
print "\nCould not generate plots for Fig 6: "
traceback.print_exc()
print
if __name__ == "__main__":
dirName = os.path.dirname(os.path.realpath(__file__))
parser = OptionParser("python %prog [-h]\n\n"
"Regenerate the plots for every figure, if the "
"appropriate pkl file exists.")
options, args = parser.parse_args(sys.argv[1:])
gen4(dirName)
# gen5(dirName)
# gen6(dirName)
# Generate performance as a function of decrements
# try:
# for fn in [
# # "superimposed_more_increments_500_features.pkl",
# "superimposed_pool_increments_varying_features.pkl",
# "superimposed_more_increments_1000_features.pkl",
# "superimposed_more_increments_varying_features.pkl",
# "superimposed_more_increments_50_features.pkl",
# "superimposed_smaller_mcs.pkl",
# ]:
# # resultsFile = os.path.join(dirName, "superimposed_pool_increments_stripped.pkl")
# resultsFile = os.path.join(dirName, fn)
# print "\n\nFile: ",fn
#
# # Analyze results
# with open(resultsFile, "rb") as f:
# results = cPickle.load(f)
#
# plotAccuracyDuringDecrementChange(results)
#
# # print "Plots for decrements generated in 'plots'"
# except Exception, e:
# print "\nCould not generate plots for decrements: "
# traceback.print_exc()
# print
# Generate performance as a function of minicolumns
# try:
# for fn in [
# "superimposed_range_of_mcs.pkl",
# ]:
# resultsFile = os.path.join(dirName, fn)
# print "\n\nFile: ",fn
#
# # Analyze results
# with open(resultsFile, "rb") as f:
# results = cPickle.load(f)
#
# plotAccuracyAndMCsDuringDecrementChange(results)
#
# # print "Plots for decrements generated in 'plots'"
# except Exception, e:
# print "\nCould not generate plots for decrements: "
# traceback.print_exc()
# print
|
agpl-3.0
|
joshloyal/scikit-learn
|
sklearn/metrics/cluster/tests/test_supervised.py
|
34
|
10313
|
import numpy as np
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import fowlkes_mallows_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.utils.testing import (
assert_equal, assert_almost_equal, assert_raise_message,
)
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# with provided sparse contingency
C = contingency_matrix(labels_a, labels_b, sparse=True)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# with provided dense contingency
C = contingency_matrix(labels_a, labels_b)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
n_samples = C.sum()
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_expected_mutual_info_overflow():
# Test for regression where contingency cell exceeds 2**16
# leading to overflow in np.outer, resulting in EMI > 1
assert expected_mutual_information(np.array([[70000]]), 70000) <= 1
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_contingency_matrix_sparse():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C_sparse = contingency_matrix(labels_a, labels_b, sparse=True).toarray()
assert_array_almost_equal(C, C_sparse)
C_sparse = assert_raise_message(ValueError,
"Cannot set 'eps' when sparse=True",
contingency_matrix, labels_a, labels_b,
eps=1e-10, sparse=True)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = (np.ones(i, dtype=np.int),
np.arange(i, dtype=np.int))
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = (random_state.randint(0, 10, i),
random_state.randint(0, 10, i))
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
def test_fowlkes_mallows_score():
# General case
score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 2, 2])
assert_almost_equal(score, 4. / np.sqrt(12. * 6.))
# Perfect match but where the label names changed
perfect_score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0])
assert_almost_equal(perfect_score, 1.)
# Worst case
worst_score = fowlkes_mallows_score([0, 0, 0, 0, 0, 0],
[0, 1, 2, 3, 4, 5])
assert_almost_equal(worst_score, 0.)
def test_fowlkes_mallows_score_properties():
# handcrafted example
labels_a = np.array([0, 0, 0, 1, 1, 2])
labels_b = np.array([1, 1, 2, 2, 0, 0])
expected = 1. / np.sqrt((1. + 3.) * (1. + 2.))
# FMI = TP / sqrt((TP + FP) * (TP + FN))
score_original = fowlkes_mallows_score(labels_a, labels_b)
assert_almost_equal(score_original, expected)
# symetric property
score_symetric = fowlkes_mallows_score(labels_b, labels_a)
assert_almost_equal(score_symetric, expected)
# permutation property
score_permuted = fowlkes_mallows_score((labels_a + 1) % 3, labels_b)
assert_almost_equal(score_permuted, expected)
# symetric and permutation(both together)
score_both = fowlkes_mallows_score(labels_b, (labels_a + 2) % 3)
assert_almost_equal(score_both, expected)
|
bsd-3-clause
|
metaml/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py
|
69
|
16818
|
"""
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
|
agpl-3.0
|
ky822/Data_Bootcamp
|
Code/Python/radar_chart_tweaks.py
|
1
|
6408
|
"""
Playing around with radar chart code from the Matplotlib examples.
From: http://matplotlib.org/examples/api/radar_chart.html
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def radar_factory(num_vars, frame='circle'):
"""
Create a radar chart with `num_vars` axes.
This function creates a RadarAxes projection and registers it.
Parameters
----------
num_vars : int
Number of variables for radar chart.
frame : {'circle' | 'polygon'}
Shape of frame surrounding axes.
"""
# calculate evenly-spaced axis angles
theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)
# rotate theta such that the first axis is at the top
theta += np.pi/2
def draw_poly_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
# unit circle centered on (0.5, 0.5)
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if frame not in patch_dict:
raise ValueError('unknown value for `frame`: %s' % frame)
class RadarAxes(PolarAxes):
name = 'radar'
# use 1 line segment to connect specified points
RESOLUTION = 1
# define draw_frame method
draw_patch = patch_dict[frame]
def fill(self, *args, **kwargs):
"""Override fill so that line is closed by default"""
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
"""Override plot so that line is closed by default"""
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
# FIXME: markers at x[0], y[0] get doubled-up
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta * 180/np.pi, labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if frame == 'circle':
return PolarAxes._gen_axes_spines(self)
# The following is a hack to get the spines (i.e. the axes frame)
# to draw correctly for a polygon frame.
# spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.
spine_type = 'circle'
verts = unit_poly_verts(theta)
# close off polygon by repeating first vertex
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def unit_poly_verts(theta):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
def example_data():
data = {
'column names':
['Sulfate', 'Nitrate', 'EC', 'OC1', 'OC2', 'OC3', 'OP', 'CO',
'O3'],
'Basecase':
[[0.88, 0.01, 0.03, 0.03, 0.00, 0.06, 0.01, 0.00, 0.00],
[0.07, 0.95, 0.04, 0.05, 0.00, 0.02, 0.01, 0.00, 0.00],
[0.01, 0.02, 0.85, 0.19, 0.05, 0.10, 0.00, 0.00, 0.00],
[0.02, 0.01, 0.07, 0.01, 0.21, 0.12, 0.98, 0.00, 0.00],
[0.01, 0.01, 0.02, 0.71, 0.74, 0.70, 0.00, 0.00, 0.00]],
'With CO':
[[0.88, 0.02, 0.02, 0.02, 0.00, 0.05, 0.00, 0.05, 0.00],
[0.08, 0.94, 0.04, 0.02, 0.00, 0.01, 0.12, 0.04, 0.00],
[0.01, 0.01, 0.79, 0.10, 0.00, 0.05, 0.00, 0.31, 0.00],
[0.00, 0.02, 0.03, 0.38, 0.31, 0.31, 0.00, 0.59, 0.00],
[0.02, 0.02, 0.11, 0.47, 0.69, 0.58, 0.88, 0.00, 0.00]],
'With O3':
[[0.89, 0.01, 0.07, 0.00, 0.00, 0.05, 0.00, 0.00, 0.03],
[0.07, 0.95, 0.05, 0.04, 0.00, 0.02, 0.12, 0.00, 0.00],
[0.01, 0.02, 0.86, 0.27, 0.16, 0.19, 0.00, 0.00, 0.00],
[0.01, 0.03, 0.00, 0.32, 0.29, 0.27, 0.00, 0.00, 0.95],
[0.02, 0.00, 0.03, 0.37, 0.56, 0.47, 0.87, 0.00, 0.00]],
'CO & O3':
[[0.87, 0.01, 0.08, 0.00, 0.00, 0.04, 0.00, 0.00, 0.01],
[0.09, 0.95, 0.02, 0.03, 0.00, 0.01, 0.13, 0.06, 0.00],
[0.01, 0.02, 0.71, 0.24, 0.13, 0.16, 0.00, 0.50, 0.00],
[0.01, 0.03, 0.00, 0.28, 0.24, 0.23, 0.00, 0.44, 0.88],
[0.02, 0.00, 0.18, 0.45, 0.64, 0.55, 0.86, 0.00, 0.16]]}
return data
if __name__ == '__main__':
N = 9
theta = radar_factory(N, frame='polygon')
data = example_data()
spoke_labels = data.pop('column names')
fig = plt.figure(figsize=(9, 9))
fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05)
colors = ['b', 'r', 'g', 'm', 'y']
# Plot the four cases from the example data on separate axes
for n, title in enumerate(data.keys()):
ax = fig.add_subplot(2, 2, n+1, projection='radar')
plt.rgrids([0.2, 0.4, 0.6, 0.8])
ax.set_title(title, weight='bold', size='medium', position=(0.5, 1.1),
horizontalalignment='center', verticalalignment='center')
for d, color in zip(data[title], colors):
ax.plot(theta, d, color=color)
ax.fill(theta, d, facecolor=color, alpha=0.25)
ax.set_varlabels(spoke_labels)
# add legend relative to top-left plot
plt.subplot(2, 2, 1)
labels = ('Factor 1', 'Factor 2', 'Factor 3', 'Factor 4', 'Factor 5')
legend = plt.legend(labels, loc=(0.9, .95), labelspacing=0.1)
plt.setp(legend.get_texts(), fontsize='small')
plt.figtext(0.5, 0.965, '5-Factor Solution Profiles Across Four Scenarios',
ha='center', color='black', weight='bold', size='large')
plt.show()
|
mit
|
gpetretto/pymatgen
|
pymatgen/analysis/diffusion_analyzer.py
|
2
|
37836
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
import warnings
import scipy.constants as const
from monty.json import MSONable
from pymatgen.analysis.structure_matcher import StructureMatcher, \
OrderDisorderElementComparator
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.util.coord import pbc_diff
"""
A module to perform diffusion analyses (e.g. calculating diffusivity from
mean square displacements etc.). If you use this module, please consider
citing the following papers::
Ong, S. P., Mo, Y., Richards, W. D., Miara, L., Lee, H. S., & Ceder, G.
(2013). Phase stability, electrochemical stability and ionic conductivity
of the Li10+-1MP2X12 (M = Ge, Si, Sn, Al or P, and X = O, S or Se) family
of superionic conductors. Energy & Environmental Science, 6(1), 148.
doi:10.1039/c2ee23355j
Mo, Y., Ong, S. P., & Ceder, G. (2012). First Principles Study of the
Li10GeP2S12 Lithium Super Ionic Conductor Material. Chemistry of Materials,
24(1), 15-17. doi:10.1021/cm203303y
"""
__author__ = "Will Richards, Shyue Ping Ong"
__version__ = "0.2"
__maintainer__ = "Will Richards"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "5/2/13"
class DiffusionAnalyzer(MSONable):
"""
Class for performing diffusion analysis.
.. attribute: diffusivity
Diffusivity in cm^2 / s
.. attribute: chg_diffusivity
Charge diffusivity in cm^2 / s
.. attribute: conductivity
Conductivity in mS / cm
.. attribute: chg_conductivity
Conductivity derived from Nernst-Einstein equation using charge
diffusivity, in mS / cm
.. attribute: diffusivity_components
A vector with diffusivity in the a, b and c directions in cm^2 / s
.. attribute: conductivity_components
A vector with conductivity in the a, b and c directions in mS / cm
.. attribute: diffusivity_std_dev
Std dev in diffusivity in cm^2 / s. Note that this makes sense only
for non-smoothed analyses.
.. attribute: chg_diffusivity_std_dev
Std dev in charge diffusivity in cm^2 / s. Note that this makes sense only
for non-smoothed analyses.
.. attribute: conductivity_std_dev
Std dev in conductivity in mS / cm. Note that this makes sense only
for non-smoothed analyses.
.. attribute: diffusivity_components_std_dev
A vector with std dev. in diffusivity in the a, b and c directions in
cm^2 / cm. Note that this makes sense only for non-smoothed analyses.
.. attribute: conductivity_components_std_dev
A vector with std dev. in conductivity in the a, b and c directions
in mS / cm. Note that this makes sense only for non-smoothed analyses.
.. attribute: max_framework_displacement
The maximum (drift adjusted) distance of any framework atom from its
starting location in A.
.. attribute: max_ion_displacements
nions x 1 array of the maximum displacement of each individual ion.
.. attribute: msd
nsteps x 1 array of the mean square displacement of specie.
.. attribute: mscd
nsteps x 1 array of the mean square charge displacement of specie.
.. attribute: msd_components
nsteps x 3 array of the MSD in each lattice direction of specie.
.. attribute: sq_disp_ions
The square displacement of all ion (both specie and other ions) as a
nions x nsteps array.
.. attribute: dt
Time coordinate array.
.. attribute: haven_ratio
Haven ratio defined as diffusivity / chg_diffusivity.
"""
def __init__(self, structure, displacements, specie, temperature,
time_step, step_skip, smoothed="max", min_obs=30,
avg_nsteps=1000, lattices=None):
"""
This constructor is meant to be used with pre-processed data.
Other convenient constructors are provided as class methods (see
from_vaspruns and from_files).
Given a matrix of displacements (see arguments below for expected
format), the diffusivity is given by::
D = 1 / 2dt * <mean square displacement>
where d is the dimensionality, t is the time. To obtain a reliable
diffusion estimate, a least squares regression of the MSD against
time to obtain the slope, which is then related to the diffusivity.
For traditional analysis, use smoothed=False and weighted=False.
Args:
structure (Structure): Initial structure.
displacements (array): Numpy array of with shape [site,
time step, axis]
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
smoothed (str): Whether to smooth the MSD, and what mode to smooth.
Supported modes are:
i. "max", which tries to use the maximum #
of data points for each time origin, subject to a
minimum # of observations given by min_obs, and then
weights the observations based on the variance
accordingly. This is the default.
ii. "constant", in which each timestep is averaged over
the number of time_steps given by min_steps.
iii. None / False / any other false-like quantity. No
smoothing.
min_obs (int): Used with smoothed="max". Minimum number of
observations to have before including in the MSD vs dt
calculation. E.g. If a structure has 10 diffusing atoms,
and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
Only applies in smoothed="max".
avg_nsteps (int): Used with smoothed="constant". Determines the
number of time steps to average over to get the msd for each
timestep. Default of 1000 is usually pretty good.
lattices (array): Numpy array of lattice matrix of every step. Used
for NPT-AIMD. For NVT-AIMD, the lattice at each time step is
set to the lattice in the "structure" argument.
"""
self.structure = structure
self.disp = displacements
self.specie = specie
self.temperature = temperature
self.time_step = time_step
self.step_skip = step_skip
self.min_obs = min_obs
self.smoothed = smoothed
self.avg_nsteps = avg_nsteps
self.lattices = lattices
if lattices is None:
self.lattices = np.array([structure.lattice.matrix.tolist()])
indices = []
framework_indices = []
for i, site in enumerate(structure):
if site.specie.symbol == specie:
indices.append(i)
else:
framework_indices.append(i)
if self.disp.shape[1] < 2:
self.diffusivity = 0.
self.conductivity = 0.
self.diffusivity_components = np.array([0., 0., 0.])
self.conductivity_components = np.array([0., 0., 0.])
self.max_framework_displacement = 0
else:
framework_disp = self.disp[framework_indices]
drift = np.average(framework_disp, axis=0)[None, :, :]
# drift corrected position
dc = self.disp - drift
nions, nsteps, dim = dc.shape
if not smoothed:
timesteps = np.arange(0, nsteps)
elif smoothed == "constant":
if nsteps <= avg_nsteps:
raise ValueError('Not enough data to calculate diffusivity')
timesteps = np.arange(0, nsteps - avg_nsteps)
else:
# limit the number of sampled timesteps to 200
min_dt = int(1000 / (self.step_skip * self.time_step))
max_dt = min(len(indices) * nsteps // self.min_obs, nsteps)
if min_dt >= max_dt:
raise ValueError('Not enough data to calculate diffusivity')
timesteps = np.arange(min_dt, max_dt,
max(int((max_dt - min_dt) / 200), 1))
dt = timesteps * self.time_step * self.step_skip
# calculate the smoothed msd values
msd = np.zeros_like(dt, dtype=np.double)
sq_disp_ions = np.zeros((len(dc), len(dt)), dtype=np.double)
msd_components = np.zeros(dt.shape + (3,))
# calculate mean square charge displacement
mscd = np.zeros_like(msd, dtype=np.double)
for i, n in enumerate(timesteps):
if not smoothed:
dx = dc[:, i:i + 1, :]
dcomponents = dc[:, i:i + 1, :]
elif smoothed == "constant":
dx = dc[:, i:i + avg_nsteps, :] - dc[:, 0:avg_nsteps, :]
dcomponents = dc[:, i:i + avg_nsteps, :] \
- dc[:, 0:avg_nsteps, :]
else:
dx = dc[:, n:, :] - dc[:, :-n, :]
dcomponents = dc[:, n:, :] - dc[:, :-n, :]
# Get msd
sq_disp = dx ** 2
sq_disp_ions[:, i] = np.average(np.sum(sq_disp, axis=2), axis=1)
msd[i] = np.average(sq_disp_ions[:, i][indices])
msd_components[i] = np.average(dcomponents[indices] ** 2,
axis=(0, 1))
# Get mscd
sq_chg_disp = np.sum(dx[indices, :, :], axis=0) ** 2
mscd[i] = np.average(np.sum(sq_chg_disp, axis=1), axis=0) / len(indices)
def weighted_lstsq(a, b):
if smoothed == "max":
# For max smoothing, we need to weight by variance.
w_root = (1 / dt) ** 0.5
return np.linalg.lstsq(
a * w_root[:, None], b * w_root, rcond=None)
else:
return np.linalg.lstsq(a, b, rcond=None)
# Get self diffusivity
m_components = np.zeros(3)
m_components_res = np.zeros(3)
a = np.ones((len(dt), 2))
a[:, 0] = dt
for i in range(3):
(m, c), res, rank, s = weighted_lstsq(a, msd_components[:, i])
m_components[i] = max(m, 1e-15)
m_components_res[i] = res[0]
(m, c), res, rank, s = weighted_lstsq(a, msd)
# m shouldn't be negative
m = max(m, 1e-15)
# Get also the charge diffusivity
(m_chg, c_chg), res_chg, _, _ = weighted_lstsq(a, mscd)
# m shouldn't be negative
m_chg = max(m_chg, 1e-15)
# factor of 10 is to convert from A^2/fs to cm^2/s
# factor of 6 is for dimensionality
conv_factor = get_conversion_factor(self.structure, self.specie,
self.temperature)
self.diffusivity = m / 60
self.chg_diffusivity = m_chg / 60
# Calculate the error in the diffusivity using the error in the
# slope from the lst sq.
# Variance in slope = n * Sum Squared Residuals / (n * Sxx - Sx
# ** 2) / (n-2).
n = len(dt)
# Pre-compute the denominator since we will use it later.
# We divide dt by 1000 to avoid overflow errors in some systems (
# e.g., win). This is subsequently corrected where denom is used.
denom = (n * np.sum((dt / 1000) ** 2) - np.sum(dt / 1000) ** 2) * (
n - 2)
self.diffusivity_std_dev = np.sqrt(n * res[0] / denom) / 60 / 1000
self.chg_diffusivity_std_dev = np.sqrt(n * res_chg[0] / denom) / 60 / 1000
self.conductivity = self.diffusivity * conv_factor
self.chg_conductivity = self.chg_diffusivity * conv_factor
self.conductivity_std_dev = self.diffusivity_std_dev * conv_factor
self.diffusivity_components = m_components / 20
self.diffusivity_components_std_dev = np.sqrt(
n * m_components_res / denom) / 20 / 1000
self.conductivity_components = self.diffusivity_components * \
conv_factor
self.conductivity_components_std_dev = \
self.diffusivity_components_std_dev * conv_factor
# Drift and displacement information.
self.drift = drift
self.corrected_displacements = dc
self.max_ion_displacements = np.max(np.sum(
dc ** 2, axis=-1) ** 0.5, axis=1)
self.max_framework_displacement = \
np.max(self.max_ion_displacements[framework_indices])
self.msd = msd
self.mscd = mscd
self.haven_ratio = self.diffusivity / self.chg_diffusivity
self.sq_disp_ions = sq_disp_ions
self.msd_components = msd_components
self.dt = dt
self.indices = indices
self.framework_indices = framework_indices
def get_drift_corrected_structures(self, start=None, stop=None, step=None):
"""
Returns an iterator for the drift-corrected structures. Use of
iterator is to reduce memory usage as # of structures in MD can be
huge. You don't often need all the structures all at once.
Args:
start, stop, step (int): applies a start/stop/step to the iterator.
Faster than applying it after generation, as it reduces the
number of structures created.
"""
coords = np.array(self.structure.cart_coords)
species = self.structure.species_and_occu
lattices = self.lattices
nsites, nsteps, dim = self.corrected_displacements.shape
for i in range(start or 0, stop or nsteps, step or 1):
latt = lattices[0] if len(lattices) == 1 else lattices[i]
yield Structure(
latt, species,
coords + self.corrected_displacements[:, i, :],
coords_are_cartesian=True)
def get_summary_dict(self, include_msd_t=False, include_mscd_t=False):
"""
Provides a summary of diffusion information.
Args:
include_msd_t (bool): Whether to include mean square displace and
time data with the data.
include_msd_t (bool): Whether to include mean square charge displace and
time data with the data.
Returns:
(dict) of diffusion and conductivity data.
"""
d = {
"D": self.diffusivity,
"D_sigma": self.diffusivity_std_dev,
"D_charge": self.chg_diffusivity,
"D_charge_sigma": self.chg_diffusivity_std_dev,
"S": self.conductivity,
"S_sigma": self.conductivity_std_dev,
"S_charge": self.chg_conductivity,
"D_components": self.diffusivity_components.tolist(),
"S_components": self.conductivity_components.tolist(),
"D_components_sigma": self.diffusivity_components_std_dev.tolist(),
"S_components_sigma": self.conductivity_components_std_dev.tolist(),
"specie": str(self.specie),
"step_skip": self.step_skip,
"time_step": self.time_step,
"temperature": self.temperature,
"max_framework_displacement": self.max_framework_displacement,
"Haven_ratio": self.haven_ratio
}
if include_msd_t:
d["msd"] = self.msd.tolist()
d["msd_components"] = self.msd_components.tolist()
d["dt"] = self.dt.tolist()
if include_mscd_t:
d["mscd"] = self.mscd.tolist()
return d
def get_framework_rms_plot(self, plt=None, granularity=200,
matching_s=None):
"""
Get the plot of rms framework displacement vs time. Useful for checking
for melting, especially if framework atoms can move via paddle-wheel
or similar mechanism (which would show up in max framework displacement
but doesn't constitute melting).
Args:
plt (matplotlib.pyplot): If plt is supplied, changes will be made
to an existing plot. Otherwise, a new plot will be created.
granularity (int): Number of structures to match
matching_s (Structure): Optionally match to a disordered structure
instead of the first structure in the analyzer. Required when
a secondary mobile ion is present.
Notes:
The method doesn't apply to NPT-AIMD simulation analysis.
"""
from pymatgen.util.plotting import pretty_plot
if self.lattices is not None and len(self.lattices) > 1:
warnings.warn("Note the method doesn't apply to NPT-AIMD "
"simulation analysis!")
plt = pretty_plot(12, 8, plt=plt)
step = (self.corrected_displacements.shape[1] - 1) // (granularity - 1)
f = (matching_s or self.structure).copy()
f.remove_species([self.specie])
sm = StructureMatcher(primitive_cell=False, stol=0.6,
comparator=OrderDisorderElementComparator(),
allow_subset=True)
rms = []
for s in self.get_drift_corrected_structures(step=step):
s.remove_species([self.specie])
d = sm.get_rms_dist(f, s)
if d:
rms.append(d)
else:
rms.append((1, 1))
max_dt = (len(rms) - 1) * step * self.step_skip * self.time_step
if max_dt > 100000:
plot_dt = np.linspace(0, max_dt / 1000, len(rms))
unit = 'ps'
else:
plot_dt = np.linspace(0, max_dt, len(rms))
unit = 'fs'
rms = np.array(rms)
plt.plot(plot_dt, rms[:, 0], label='RMS')
plt.plot(plot_dt, rms[:, 1], label='max')
plt.legend(loc='best')
plt.xlabel("Timestep ({})".format(unit))
plt.ylabel("normalized distance")
plt.tight_layout()
return plt
def get_msd_plot(self, plt=None, mode="specie"):
"""
Get the plot of the smoothed msd vs time graph. Useful for
checking convergence. This can be written to an image file.
Args:
plt: A plot object. Defaults to None, which means one will be
generated.
mode (str): Determines type of msd plot. By "species", "sites",
or direction (default). If mode = "mscd", the smoothed mscd vs.
time will be plotted.
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8, plt=plt)
if np.max(self.dt) > 100000:
plot_dt = self.dt / 1000
unit = 'ps'
else:
plot_dt = self.dt
unit = 'fs'
if mode == "species":
for sp in sorted(self.structure.composition.keys()):
indices = [i for i, site in enumerate(self.structure) if
site.specie == sp]
sd = np.average(self.sq_disp_ions[indices, :], axis=0)
plt.plot(plot_dt, sd, label=sp.__str__())
plt.legend(loc=2, prop={"size": 20})
elif mode == "sites":
for i, site in enumerate(self.structure):
sd = self.sq_disp_ions[i, :]
plt.plot(plot_dt, sd, label="%s - %d" % (
site.specie.__str__(), i))
plt.legend(loc=2, prop={"size": 20})
elif mode == "mscd":
plt.plot(plot_dt, self.mscd, 'r')
plt.legend(["Overall"], loc=2, prop={"size": 20})
else:
# Handle default / invalid mode case
plt.plot(plot_dt, self.msd, 'k')
plt.plot(plot_dt, self.msd_components[:, 0], 'r')
plt.plot(plot_dt, self.msd_components[:, 1], 'g')
plt.plot(plot_dt, self.msd_components[:, 2], 'b')
plt.legend(["Overall", "a", "b", "c"], loc=2, prop={"size": 20})
plt.xlabel("Timestep ({})".format(unit))
if mode == "mscd":
plt.ylabel("MSCD ($\\AA^2$)")
else:
plt.ylabel("MSD ($\\AA^2$)")
plt.tight_layout()
return plt
def plot_msd(self, mode="default"):
"""
Plot the smoothed msd vs time graph. Useful for checking convergence.
Args:
mode (str): Can be "default" (the default, shows only the MSD for
the diffusing specie, and its components), "ions" (individual
square displacements of all ions), "species" (mean square
displacement by specie), or "mscd" (overall mean square charge
displacement for diffusing specie).
"""
self.get_msd_plot(mode=mode).show()
def export_msdt(self, filename):
"""
Writes MSD data to a csv file that can be easily plotted in other
software.
Args:
filename (str): Filename. Supported formats are csv and dat. If
the extension is csv, a csv file is written. Otherwise,
a dat format is assumed.
"""
fmt = "csv" if filename.lower().endswith(".csv") else "dat"
delimiter = ", " if fmt == "csv" else " "
with open(filename, "wt") as f:
if fmt == "dat":
f.write("# ")
f.write(delimiter.join(["t", "MSD", "MSD_a", "MSD_b", "MSD_c",
"MSCD"]))
f.write("\n")
for dt, msd, msdc, mscd in zip(self.dt, self.msd,
self.msd_components, self.mscd):
f.write(delimiter.join(["%s" % v for v in [dt, msd] + list(
msdc) + [mscd]]))
f.write("\n")
@classmethod
def from_structures(cls, structures, specie, temperature,
time_step, step_skip, initial_disp=None,
initial_structure=None, **kwargs):
"""
Convenient constructor that takes in a list of Structure objects to
perform diffusion analysis.
Args:
structures ([Structure]): list of Structure objects (must be
ordered in sequence of run). E.g., you may have performed
sequential VASP runs to obtain sufficient statistics.
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
initial_disp (np.ndarray): Sometimes, you need to iteratively
compute estimates of the diffusivity. This supplies an
initial displacement that will be added on to the initial
displacements. Note that this makes sense only when
smoothed=False.
initial_structure (Structure): Like initial_disp, this is used
for iterative computations of estimates of the diffusivity. You
typically need to supply both variables. This stipulates the
initial structure from which the current set of displacements
are computed.
\\*\\*kwargs: kwargs supported by the :class:`DiffusionAnalyzer`_.
Examples include smoothed, min_obs, avg_nsteps.
"""
p, l = [], []
for i, s in enumerate(structures):
if i == 0:
structure = s
p.append(np.array(s.frac_coords)[:, None])
l.append(s.lattice.matrix)
if initial_structure is not None:
p.insert(0, np.array(initial_structure.frac_coords)[:, None])
l.insert(0, initial_structure.lattice.matrix)
else:
p.insert(0, p[0])
l.insert(0, l[0])
p = np.concatenate(p, axis=1)
dp = p[:, 1:] - p[:, :-1]
dp = dp - np.round(dp)
f_disp = np.cumsum(dp, axis=1)
c_disp = []
for i in f_disp:
c_disp.append( [ np.dot(d, m) for d, m in zip(i, l[1:]) ] )
disp = np.array(c_disp)
# If is NVT-AIMD, clear lattice data.
if np.array_equal(l[0], l[-1]):
l = np.array([l[0]])
else:
l = np.array(l)
if initial_disp is not None:
disp += initial_disp[:, None, :]
return cls(structure, disp, specie, temperature, time_step,
step_skip=step_skip, lattices=l, **kwargs)
@classmethod
def from_vaspruns(cls, vaspruns, specie, initial_disp=None,
initial_structure=None, **kwargs):
"""
Convenient constructor that takes in a list of Vasprun objects to
perform diffusion analysis.
Args:
vaspruns ([Vasprun]): List of Vaspruns (must be ordered in
sequence of MD simulation). E.g., you may have performed
sequential VASP runs to obtain sufficient statistics.
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
initial_disp (np.ndarray): Sometimes, you need to iteratively
compute estimates of the diffusivity. This supplies an
initial displacement that will be added on to the initial
displacements. Note that this makes sense only when
smoothed=False.
initial_structure (Structure): Like initial_disp, this is used
for iterative computations of estimates of the diffusivity. You
typically need to supply both variables. This stipulates the
initial stricture from which the current set of displacements
are computed.
\\*\\*kwargs: kwargs supported by the :class:`DiffusionAnalyzer`_.
Examples include smoothed, min_obs, avg_nsteps.
"""
def get_structures(vaspruns):
for i, vr in enumerate(vaspruns):
if i == 0:
step_skip = vr.ionic_step_skip or 1
final_structure = vr.initial_structure
temperature = vr.parameters['TEEND']
time_step = vr.parameters['POTIM']
yield step_skip, temperature, time_step
# check that the runs are continuous
fdist = pbc_diff(vr.initial_structure.frac_coords,
final_structure.frac_coords)
if np.any(fdist > 0.001):
raise ValueError('initial and final structures do not '
'match.')
final_structure = vr.final_structure
assert (vr.ionic_step_skip or 1) == step_skip
for s in vr.ionic_steps:
yield s['structure']
s = get_structures(vaspruns)
step_skip, temperature, time_step = next(s)
return cls.from_structures(
structures=list(s), specie=specie, temperature=temperature,
time_step=time_step, step_skip=step_skip,
initial_disp=initial_disp, initial_structure=initial_structure,
**kwargs)
@classmethod
def from_files(cls, filepaths, specie, step_skip=10, ncores=None,
initial_disp=None, initial_structure=None, **kwargs):
"""
Convenient constructor that takes in a list of vasprun.xml paths to
perform diffusion analysis.
Args:
filepaths ([str]): List of paths to vasprun.xml files of runs. (
must be ordered in sequence of MD simulation). For example,
you may have done sequential VASP runs and they are in run1,
run2, run3, etc. You should then pass in
["run1/vasprun.xml", "run2/vasprun.xml", ...].
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
ncores (int): Numbers of cores to use for multiprocessing. Can
speed up vasprun parsing considerably. Defaults to None,
which means serial. It should be noted that if you want to
use multiprocessing, the number of ionic steps in all vasprun
.xml files should be a multiple of the ionic_step_skip.
Otherwise, inconsistent results may arise. Serial mode has no
such restrictions.
initial_disp (np.ndarray): Sometimes, you need to iteratively
compute estimates of the diffusivity. This supplies an
initial displacement that will be added on to the initial
displacements. Note that this makes sense only when
smoothed=False.
initial_structure (Structure): Like initial_disp, this is used
for iterative computations of estimates of the diffusivity. You
typically need to supply both variables. This stipulates the
initial structure from which the current set of displacements
are computed.
\\*\\*kwargs: kwargs supported by the :class:`DiffusionAnalyzer`_.
Examples include smoothed, min_obs, avg_nsteps.
"""
if ncores is not None and len(filepaths) > 1:
import multiprocessing
p = multiprocessing.Pool(ncores)
vaspruns = p.imap(_get_vasprun,
[(fp, step_skip) for fp in filepaths])
analyzer = cls.from_vaspruns(
vaspruns, specie=specie, initial_disp=initial_disp,
initial_structure=initial_structure, **kwargs)
p.close()
p.join()
return analyzer
else:
def vr(filepaths):
offset = 0
for p in filepaths:
v = Vasprun(p, ionic_step_offset=offset,
ionic_step_skip=step_skip)
yield v
# Recompute offset.
offset = (-(v.nionic_steps - offset)) % step_skip
return cls.from_vaspruns(
vr(filepaths), specie=specie, initial_disp=initial_disp,
initial_structure=initial_structure, **kwargs)
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"displacements": self.disp.tolist(),
"specie": self.specie,
"temperature": self.temperature,
"time_step": self.time_step,
"step_skip": self.step_skip,
"min_obs": self.min_obs,
"smoothed": self.smoothed,
"avg_nsteps": self.avg_nsteps,
"lattices": self.lattices.tolist()
}
@classmethod
def from_dict(cls, d):
structure = Structure.from_dict(d["structure"])
return cls(structure, np.array(d["displacements"]), specie=d["specie"],
temperature=d["temperature"], time_step=d["time_step"],
step_skip=d["step_skip"], min_obs=d["min_obs"],
smoothed=d.get("smoothed", "max"),
avg_nsteps=d.get("avg_nsteps", 1000),
lattices=np.array(d.get("lattices",
[d["structure"]["lattice"][
"matrix"]])))
def get_conversion_factor(structure, species, temperature):
"""
Conversion factor to convert between cm^2/s diffusivity measurements and
mS/cm conductivity measurements based on number of atoms of diffusing
species. Note that the charge is based on the oxidation state of the
species (where available), or else the number of valence electrons
(usually a good guess, esp for main group ions).
Args:
structure (Structure): Input structure.
species (Element/Specie): Diffusing species.
temperature (float): Temperature of the diffusion run in Kelvin.
Returns:
Conversion factor.
Conductivity (in mS/cm) = Conversion Factor * Diffusivity (in cm^2/s)
"""
df_sp = get_el_sp(species)
if hasattr(df_sp, "oxi_state"):
z = df_sp.oxi_state
else:
z = df_sp.full_electronic_structure[-1][2]
n = structure.composition[species]
vol = structure.volume * 1e-24 # units cm^3
return 1000 * n / (vol * const.N_A) * z ** 2 * (const.N_A * const.e) ** 2 \
/ (const.R * temperature)
def _get_vasprun(args):
"""
Internal method to support multiprocessing.
"""
return Vasprun(args[0], ionic_step_skip=args[1],
parse_dos=False, parse_eigen=False)
def fit_arrhenius(temps, diffusivities):
"""
Returns Ea, c, standard error of Ea from the Arrhenius fit:
D = c * exp(-Ea/kT)
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
"""
t_1 = 1 / np.array(temps)
logd = np.log(diffusivities)
# Do a least squares regression of log(D) vs 1/T
a = np.array([t_1, np.ones(len(temps))]).T
w, res, _, _ = np.linalg.lstsq(a, logd, rcond=None)
w = np.array(w)
n = len(temps)
if n > 2:
std_Ea = (res[0] / (n - 2) / (
n * np.var(t_1))) ** 0.5 * const.k / const.e
else:
std_Ea = None
return -w[0] * const.k / const.e, np.exp(w[1]), std_Ea
def get_extrapolated_diffusivity(temps, diffusivities, new_temp):
"""
Returns (Arrhenius) extrapolated diffusivity at new_temp
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
new_temp (float): desired temperature. units: K
Returns:
(float) Diffusivity at extrapolated temp in mS/cm.
"""
Ea, c, _ = fit_arrhenius(temps, diffusivities)
return c * np.exp(-Ea / (const.k / const.e * new_temp))
def get_extrapolated_conductivity(temps, diffusivities, new_temp, structure,
species):
"""
Returns extrapolated mS/cm conductivity.
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
new_temp (float): desired temperature. units: K
structure (structure): Structure used for the diffusivity calculation
species (string/Specie): conducting species
Returns:
(float) Conductivity at extrapolated temp in mS/cm.
"""
return get_extrapolated_diffusivity(temps, diffusivities, new_temp) \
* get_conversion_factor(structure, species, new_temp)
def get_arrhenius_plot(temps, diffusivities, diffusivity_errors=None,
**kwargs):
"""
Returns an Arrhenius plot.
Args:
temps ([float]): A sequence of temperatures.
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity).
diffusivity_errors ([float]): A sequence of errors for the
diffusivities. If None, no error bar is plotted.
\\*\\*kwargs:
Any keyword args supported by matplotlib.pyplot.plot.
Returns:
A matplotlib.pyplot object. Do plt.show() to show the plot.
"""
Ea, c, _ = fit_arrhenius(temps, diffusivities)
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
# log10 of the arrhenius fit
arr = c * np.exp(-Ea / (const.k / const.e * np.array(temps)))
t_1 = 1000 / np.array(temps)
plt.plot(t_1, diffusivities, 'ko', t_1, arr, 'k--', markersize=10,
**kwargs)
if diffusivity_errors is not None:
n = len(diffusivity_errors)
plt.errorbar(t_1[0:n], diffusivities[0:n], yerr=diffusivity_errors,
fmt='ko', ecolor='k', capthick=2, linewidth=2)
ax = plt.axes()
ax.set_yscale('log')
plt.text(0.6, 0.85, "E$_a$ = {:.0f} meV".format(Ea * 1000),
fontsize=30, transform=plt.axes().transAxes)
plt.ylabel("D (cm$^2$/s)")
plt.xlabel("1000/T (K$^{-1}$)")
plt.tight_layout()
return plt
|
mit
|
uglyboxer/linear_neuron
|
net-p3/lib/python3.5/site-packages/matplotlib/tests/test_contour.py
|
10
|
6418
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import datetime
import numpy as np
from matplotlib import mlab
from matplotlib.testing.decorators import cleanup, image_comparison
from matplotlib import pyplot as plt
import re
@cleanup
def test_contour_shape_1d_valid():
x = np.arange(10)
y = np.arange(9)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contour(x, y, z)
@cleanup
def test_contour_shape_2d_valid():
x = np.arange(10)
y = np.arange(9)
xg, yg = np.meshgrid(x, y)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contour(xg, yg, z)
@cleanup
def test_contour_shape_mismatch_1():
x = np.arange(9)
y = np.arange(9)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Length of x must be number of columns in z.'
@cleanup
def test_contour_shape_mismatch_2():
x = np.arange(10)
y = np.arange(10)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Length of y must be number of rows in z.'
@cleanup
def test_contour_shape_mismatch_3():
x = np.arange(10)
y = np.arange(10)
xg, yg = np.meshgrid(x, y)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(xg, y, z)
except TypeError as exc:
assert exc.args[0] == 'Number of dimensions of x and y should match.'
try:
ax.contour(x, yg, z)
except TypeError as exc:
assert exc.args[0] == 'Number of dimensions of x and y should match.'
@cleanup
def test_contour_shape_mismatch_4():
g = np.random.random((9, 10))
b = np.random.random((9, 9))
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(b, g, z)
except TypeError as exc:
print(exc.args[0])
assert re.match(
r'Shape of x does not match that of z: ' +
r'found \(9L?, 9L?\) instead of \(9L?, 10L?\)\.',
exc.args[0]) is not None
try:
ax.contour(g, b, z)
except TypeError as exc:
assert re.match(
r'Shape of y does not match that of z: ' +
r'found \(9L?, 9L?\) instead of \(9L?, 10L?\)\.',
exc.args[0]) is not None
@cleanup
def test_contour_shape_invalid_1():
x = np.random.random((3, 3, 3))
y = np.random.random((3, 3, 3))
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Inputs x and y must be 1D or 2D.'
@cleanup
def test_contour_shape_invalid_2():
x = np.random.random((3, 3, 3))
y = np.random.random((3, 3, 3))
z = np.random.random((3, 3, 3))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Input z must be a 2D array.'
@image_comparison(baseline_images=['contour_manual_labels'])
def test_contour_manual_labels():
x, y = np.meshgrid(np.arange(0, 10), np.arange(0, 10))
z = np.max(np.dstack([abs(x), abs(y)]), 2)
plt.figure(figsize=(6, 2))
cs = plt.contour(x, y, z)
pts = np.array([(1.5, 3.0), (1.5, 4.4), (1.5, 6.0)])
plt.clabel(cs, manual=pts)
@image_comparison(baseline_images=['contour_manual_colors_and_levels'],
extensions=['png'], remove_text=True)
def test_given_colors_levels_and_extends():
_, axes = plt.subplots(2, 4)
data = np.arange(12).reshape(3, 4)
colors = ['red', 'yellow', 'pink', 'blue', 'black']
levels = [2, 4, 8, 10]
for i, ax in enumerate(axes.flatten()):
plt.sca(ax)
filled = i % 2 == 0.
extend = ['neither', 'min', 'max', 'both'][i // 2]
if filled:
last_color = -1 if extend in ['min', 'max'] else None
plt.contourf(data, colors=colors[:last_color], levels=levels,
extend=extend)
else:
last_level = -1 if extend == 'both' else None
plt.contour(data, colors=colors, levels=levels[:last_level],
extend=extend)
plt.colorbar()
@image_comparison(baseline_images=['contour_datetime_axis'],
extensions=['png'], remove_text=False)
def test_contour_datetime_axis():
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)
base = datetime.datetime(2013, 1, 1)
x = np.array([base + datetime.timedelta(days=d) for d in range(20)])
y = np.arange(20)
z1, z2 = np.meshgrid(np.arange(20), np.arange(20))
z = z1 * z2
plt.subplot(221)
plt.contour(x, y, z)
plt.subplot(222)
plt.contourf(x, y, z)
x = np.repeat(x[np.newaxis], 20, axis=0)
y = np.repeat(y[:, np.newaxis], 20, axis=1)
plt.subplot(223)
plt.contour(x, y, z)
plt.subplot(224)
plt.contourf(x, y, z)
for ax in fig.get_axes():
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
@image_comparison(baseline_images=['contour_test_label_transforms'],
extensions=['png'], remove_text=True)
def test_labels():
# Adapted from pylab_examples example code: contour_demo.py
# see issues #2475, #2843, and #2818 for explanation
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
fig, ax = plt.subplots(1, 1)
CS = ax.contour(X, Y, Z)
disp_units = [(216, 177), (359, 290), (521, 406)]
data_units = [(-2, .5), (0, -1.5), (2.8, 1)]
CS.clabel()
for x, y in data_units:
CS.add_label_near(x, y, inline=True, transform=None)
for x, y in disp_units:
CS.add_label_near(x, y, inline=True, transform=False)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
mit
|
aidendoherty/biobankAccelerometerAnalysis
|
accelerometer/accUtils.py
|
1
|
17668
|
"""Module to provide generic utilities for other accelerometer modules."""
from collections import OrderedDict
import datetime
import glob
import json
import math
import numpy as np
import os
import pandas as pd
import re
DAYS = ['mon', 'tue', 'wed', 'thur', 'fri', 'sat', 'sun']
TIME_SERIES_COL = 'time'
def formatNum(num, decimalPlaces):
"""return str of number formatted to number of decimalPlaces
When writing out 10,000's of files, it is useful to format the output to n
decimal places as a space saving measure.
:param float num: Float number to be formatted.
:param int decimalPlaces: Number of decimal places for output format
:return: Number formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.formatNum(2.567, 2)
2.57
"""
fmt = '%.' + str(decimalPlaces) + 'f'
return float(fmt % num)
def meanSDstr(mean, std, numDecimalPlaces):
"""return str of mean and stdev numbers formatted to number of decimalPlaces
:param float mean: Mean number to be formatted.
:param float std: Standard deviation number to be formatted.
:param int decimalPlaces: Number of decimal places for output format
:return: String formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.meanSDstr(2.567, 0.089, 2)
2.57 (0.09)
"""
outStr = str(formatNum(mean, numDecimalPlaces))
outStr += ' ('
outStr += str(formatNum(std, numDecimalPlaces))
outStr += ')'
return outStr
def meanCIstr(mean, std, n, numDecimalPlaces):
"""return str of mean and 95% confidence interval numbers formatted
:param float mean: Mean number to be formatted.
:param float std: Standard deviation number to be formatted.
:param int n: Number of observations
:param int decimalPlaces: Number of decimal places for output format
:return: String formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.meanSDstr(2.567, 0.089, 2)
2.57 (0.09)
"""
stdErr = std / math.sqrt(n)
lowerCI = mean - 1.96*stdErr
upperCI = mean + 1.96*stdErr
outStr = str(formatNum(mean, numDecimalPlaces))
outStr += ' ('
outStr += str(formatNum(lowerCI, numDecimalPlaces))
outStr += ' - '
outStr += str(formatNum(upperCI, numDecimalPlaces))
outStr += ')'
return outStr
def toScreen(msg):
"""Print msg str prepended with current time
:param str mgs: Message to be printed to screen
:return: Print msg str prepended with current time
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.toScreen("hello")
2018-11-28 10:53:18 hello
"""
timeFormat = '%Y-%m-%d %H:%M:%S'
print(f"\n{datetime.datetime.now().strftime(timeFormat)}\t{msg}")
def writeStudyAccProcessCmds(accDir, outDir, cmdsFile='processCmds.txt',
accExt="cwa", cmdOptions=None, filesCSV="files.csv"):
"""Read files to process and write out list of processing commands
This creates the following output directory structure containing all
processing results:
<outDir>/
summary/ #to store outputSummary.json
epoch/ #to store feature output for 30sec windows
timeSeries/ #simple csv time series output (VMag, activity binary predictions)
nonWear/ #bouts of nonwear episodes
stationary/ #temp store for features of stationary data for calibration
clusterLogs/ #to store terminal output for each processed file
If a filesCSV exists in accDir/, process the files listed there. If not,
all files in accDir/ are processed
Then an acc processing command is written for each file and written to cmdsFile
:param str accDirs: Directory(s) with accelerometer files to process
:param str outDir: Output directory to be created containing the processing results
:param str cmdsFile: Output .txt file listing all processing commands
:param str accExt: Acc file type e.g. cwa, CWA, bin, BIN, gt3x...
:param str cmdOptions: String of processing options e.g. "--epochPeriod 10"
Type 'python3 accProccess.py -h' for full list of options
:param str filesCSV: Name of .csv file listing acc files to process
:return: New file written to <cmdsFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.writeStudyAccProcessingCmds("myAccDir/", "myResults/", "myProcessCmds.txt")
<cmd options written to "myProcessCmds.txt">
"""
# Create output directory structure
summaryDir = os.path.join(outDir, 'summary')
epochDir = os.path.join(outDir, 'epoch')
timeSeriesDir = os.path.join(outDir, 'timeSeries')
nonWearDir = os.path.join(outDir, 'nonWear')
stationaryDir = os.path.join(outDir, 'stationary')
logsDir = os.path.join(outDir, 'clusterLogs')
rawDir = os.path.join(outDir, 'raw')
npyDir = os.path.join(outDir, 'npy')
createDirIfNotExists(summaryDir)
createDirIfNotExists(epochDir)
createDirIfNotExists(timeSeriesDir)
createDirIfNotExists(nonWearDir)
createDirIfNotExists(stationaryDir)
createDirIfNotExists(logsDir)
createDirIfNotExists(rawDir)
createDirIfNotExists(npyDir)
createDirIfNotExists(outDir)
# Use filesCSV if provided, else process everything in accDir (and create filesCSV)
if filesCSV in os.listdir(accDir):
fileList = pd.read_csv(os.path.join(accDir, filesCSV))
else:
fileList = pd.DataFrame(
{'fileName': [f for f in os.listdir(accDir) if f.endswith(accExt)]}
)
fileList.to_csv(os.path.join(accDir, filesCSV), index=False)
with open(cmdsFile, 'w') as f:
for i, row in fileList.iterrows():
cmd = [
'python3 accProcess.py "{:s}"'.format(os.path.join(accDir, row['fileName'])),
'--summaryFolder "{:s}"'.format(summaryDir),
'--epochFolder "{:s}"'.format(epochDir),
'--timeSeriesFolder "{:s}"'.format(timeSeriesDir),
'--nonWearFolder "{:s}"'.format(nonWearDir),
'--stationaryFolder "{:s}"'.format(stationaryDir),
'--rawFolder "{:s}"'.format(rawDir),
'--npyFolder "{:s}"'.format(npyDir),
'--outputFolder "{:s}"'.format(outDir)
]
# Grab additional arguments provided in filesCSV (e.g. calibration params)
cmdOptionsCSV = ' '.join(['--{} {}'.format(col, row[col]) for col in fileList.columns[1:]])
if cmdOptions:
cmd.append(cmdOptions)
if cmdOptionsCSV:
cmd.append(cmdOptionsCSV)
cmd = ' '.join(cmd)
f.write(cmd)
f.write('\n')
print('Processing list written to ', cmdsFile)
print('Suggested dir for log files: ', logsDir)
def collateJSONfilesToSingleCSV(inputJsonDir, outputCsvFile):
"""read all summary *.json files and convert into one large CSV file
Each json file represents summary data for one participant. Therefore output
CSV file contains summary for all participants.
:param str inputJsonDir: Directory containing JSON files
:param str outputCsvFile: Output CSV filename
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.collateJSONfilesToSingleCSV("data/", "data/summary-all-files.csv")
<summary CSV of all participants/files written to "data/sumamry-all-files.csv">
"""
### First combine into <tmpJsonFile> the processed outputs from <inputJsonDir>
tmpJsonFile = outputCsvFile.replace('.csv','-tmp.json')
count = 0
with open(tmpJsonFile,'w') as fSummary:
for fStr in glob.glob(inputJsonDir + "*.json"):
if fStr == tmpJsonFile: continue
with open(fStr) as f:
if count == 0:
fSummary.write('[')
else:
fSummary.write(',')
fSummary.write(f.read().rstrip())
count += 1
fSummary.write(']')
### Convert temporary json file into csv file
dict = json.load(open(tmpJsonFile,"r"), object_pairs_hook=OrderedDict) #read json
df = pd.DataFrame.from_dict(dict) #create pandas object from json dict
refColumnItem = next((item for item in dict if item['quality-goodWearTime'] == 1), None)
dAcc = df[list(refColumnItem.keys())] #maintain intended column ordering
# infer participant ID
dAcc['eid'] = dAcc['file-name'].str.split('/').str[-1].str.replace('.CWA','.cwa').str.replace('.cwa','')
dAcc.to_csv(outputCsvFile, index=False)
#remove tmpJsonFile
os.remove(tmpJsonFile)
print('Summary of', str(len(dAcc)), 'participants written to:', outputCsvFile)
def identifyUnprocessedFiles(filesCsv, summaryCsv, outputFilesCsv):
"""identify files that have not been processed
Look through all processed accelerometer files, and find participants who do
not have records in the summary csv file. This indicates there was a problem
in processing their data. Therefore, output will be a new .csv file to
support reprocessing of these files
:param str filesCsv: CSV listing acc files in study directory
:param str summaryCsv: Summary CSV of processed dataset
:param str outputFilesCsv: Output csv listing files to be reprocessed
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.identifyUnprocessedFiles("study/files.csv", study/summary-all-files.csv",
"study/files-reprocess.csv")
<Output csv listing files to be reprocessed written to "study/files-reprocess.csv">
"""
fileList = pd.read_csv(filesCsv)
summary = pd.read_csv(summaryCsv)
output = fileList[~fileList['fileName'].isin(list(summary['file-name']))]
output = output.rename(columns={'Unnamed: 1': ''})
output.to_csv(outputFilesCsv, index=False)
print('Reprocessing for ', len(output), 'participants written to:',
outputFilesCsv)
def updateCalibrationCoefs(inputCsvFile, outputCsvFile):
"""read summary .csv file and update coefs for those with poor calibration
Look through all processed accelerometer files, and find participants that
did not have good calibration data. Then assigns the calibration coefs from
previous good use of a given device. Output will be a new .csv file to
support reprocessing of uncalibrated files with new pre-specified calibration coefs.
:param str inputCsvFile: Summary CSV of processed dataset
:param str outputCsvFile: Output CSV of files to be reprocessed with new
calibration info
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.updateCalibrationCoefs("data/summary-all-files.csv", "study/files-recalibration.csv")
<CSV of files to be reprocessed written to "study/files-recalibration.csv">
"""
d = pd.read_csv(inputCsvFile)
#select participants with good spread of stationary values for calibration
goodCal = d.loc[(d['quality-calibratedOnOwnData']==1) & (d['quality-goodCalibration']==1)]
#now only select participants whose data was NOT calibrated on a good spread of stationary values
badCal = d.loc[(d['quality-calibratedOnOwnData']==1) & (d['quality-goodCalibration']==0)]
#sort files by start time, which makes selection of most recent value easier
goodCal = goodCal.sort_values(['file-startTime'])
badCal = badCal.sort_values(['file-startTime'])
calCols = ['calibration-xOffset(g)','calibration-yOffset(g)','calibration-zOffset(g)',
'calibration-xSlope(g)','calibration-ySlope(g)','calibration-zSlope(g)',
'calibration-xTemp(C)','calibration-yTemp(C)','calibration-zTemp(C)',
'calibration-meanDeviceTemp(C)']
#print output CSV file with suggested calibration parameters
noOtherUses = 0
nextUses = 0
previousUses = 0
f = open(outputCsvFile,'w')
f.write('fileName,calOffset,calSlope,calTemp,meanTemp\n')
for ix, row in badCal.iterrows():
#first get current 'bad' file
participant, device, startTime = row[['file-name','file-deviceID','file-startTime']]
device = int(device)
#get calibration values from most recent previous use of this device
# (when it had a 'good' calibration)
prevUse = goodCal[calCols][(goodCal['file-deviceID']==device) & (goodCal['file-startTime']<startTime)].tail(1)
try:
ofX, ofY, ofZ, slpX, slpY, slpZ, tmpX, tmpY, tmpZ, calTempAvg = prevUse.iloc[0]
previousUses += 1
except:
nextUse = goodCal[calCols][(goodCal['file-deviceID']==device) & (goodCal['file-startTime']>startTime)].head(1)
if len(nextUse)<1:
print('no other uses for this device at all: ', str(device),
str(participant))
noOtherUses += 1
continue
nextUses += 1
ofX, ofY, ofZ, slpX, slpY, slpZ, tmpX, tmpY, tmpZ, calTempAvg = nextUse.iloc[0]
#now construct output
out = participant + ','
out += str(ofX) + ' ' + str(ofY) + ' ' + str(ofZ) + ','
out += str(slpX) + ' ' + str(slpY) + ' ' + str(slpZ) + ','
out += str(tmpX) + ' ' + str(tmpY) + ' ' + str(tmpZ) + ','
out += str(calTempAvg)
f.write(out + '\n')
f.close()
print('previousUses', previousUses)
print('nextUses', nextUses)
print('noOtherUses', noOtherUses)
print('Reprocessing for ', str(previousUses + nextUses),
'participants written to:', outputCsvFile)
def writeFilesWithCalibrationCoefs(inputCsvFile, outputCsvFile):
"""read summary .csv file and write files.csv with calibration coefs
Look through all processed accelerometer files, and write a new .csv file to
support reprocessing of files with pre-specified calibration coefs.
:param str inputCsvFile: Summary CSV of processed dataset
:param str outputCsvFile: Output CSV of files to process with calibration info
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.writeFilesWithCalibrationCoefs("data/summary-all-files.csv",
>>> "study/files-calibrated.csv")
<CSV of files to be reprocessed written to "study/files-calibrated.csv">
"""
d = pd.read_csv(inputCsvFile)
calCols = ['calibration-xOffset(g)','calibration-yOffset(g)','calibration-zOffset(g)',
'calibration-xSlope(g)','calibration-ySlope(g)','calibration-zSlope(g)',
'calibration-xTemp(C)','calibration-yTemp(C)','calibration-zTemp(C)',
'calibration-meanDeviceTemp(C)']
#print output CSV file with suggested calibration parameters
f = open(outputCsvFile,'w')
f.write('fileName,calOffset,calSlope,calTemp,meanTemp\n')
for ix, row in d.iterrows():
#first get current file information
participant = str(row['file-name'])
ofX, ofY, ofZ, slpX, slpY, slpZ, tmpX, tmpY, tmpZ, calTempAvg = row[calCols]
#now construct output
out = participant + ','
out += str(ofX) + ' ' + str(ofY) + ' ' + str(ofZ) + ','
out += str(slpX) + ' ' + str(slpY) + ' ' + str(slpZ) + ','
out += str(tmpX) + ' ' + str(tmpY) + ' ' + str(tmpZ) + ','
out += str(calTempAvg)
f.write(out + '\n')
f.close()
print('Files with calibration coefficients for ', str(len(d)),
'participants written to:', outputCsvFile)
def createDirIfNotExists(folder):
""" Create directory if it doesn't currently exist
:param str folder: Directory to be checked/created
:return: Dir now exists (created if didn't exist before, otherwise untouched)
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.createDirIfNotExists("/myStudy/summary/dec18/")
<folder "/myStudy/summary/dec18/" now exists>
"""
if not os.path.exists(folder):
os.makedirs(folder)
def date_parser(t):
'''
Parse date a date string of the form e.g.
2020-06-14 19:01:15.123+0100 [Europe/London]
'''
tz = re.search(r'(?<=\[).+?(?=\])', t)
if tz is not None:
tz = tz.group()
t = re.sub(r'\[(.*?)\]', '', t)
return pd.to_datetime(t, utc=True).tz_convert(tz)
def date_strftime(t):
'''
Convert to time format of the form e.g.
2020-06-14 19:01:15.123+0100 [Europe/London]
'''
tz = t.tz
return t.strftime(f'%Y-%m-%d %H:%M:%S.%f%z [{tz}]')
def writeTimeSeries(e, labels, tsFile):
""" Write activity timeseries file
:param pandas.DataFrame e: Pandas dataframe of epoch data. Must contain
activity classification columns with missing rows imputed.
:param list(str) labels: Activity state labels
:param dict tsFile: output CSV filename
:return: None
:rtype: void
"""
cols = ['accImputed']
cols_new = ['acc']
labelsImputed = [l + 'Imputed' for l in labels]
cols.extend(labelsImputed)
cols_new.extend(labels)
if 'MET' in e.columns:
cols.append('METImputed')
cols_new.append('MET')
e_new = pd.DataFrame(index=e.index)
e_new.index.name = 'time'
e_new['imputed'] = e.isna().any(1).astype('int')
e_new[cols_new] = e[cols]
# make output time format contain timezone
# e.g. 2020-06-14 19:01:15.123000+0100 [Europe/London]
e_new.index = e_new.index.to_series(keep_tz=True).apply(date_strftime)
e_new.to_csv(tsFile, compression='gzip')
|
bsd-2-clause
|
DStauffman/dstauffman
|
dstauffman/utils.py
|
1
|
55659
|
r"""
Generic utilities that can be independently defined and used by other modules.
Notes
-----
#. By design, this module does not reference any other piece of the dstauffman code base except
constants to avoid circular references.
#. Written by David C. Stauffer in March 2015.
"""
#%% Imports
from __future__ import annotations
from collections.abc import Mapping
from contextlib import contextmanager
import datetime
import doctest
from functools import reduce
import inspect
from io import StringIO
import os
from pathlib import Path
import shlex
import subprocess
import sys
from typing import Any, Callable, Dict, Iterable, List, Literal, Optional, overload, Tuple, \
TypeVar, TYPE_CHECKING, Union
import unittest
import warnings
from dstauffman.constants import HAVE_NUMPY, HAVE_SCIPY, IS_WINDOWS
from dstauffman.enums import ReturnCodes
from dstauffman.units import MONTHS_PER_YEAR
if HAVE_NUMPY:
import numpy as np
from numpy import inf, nan, logical_not
if TYPE_CHECKING:
from numpy.typing import ArrayLike
else:
from math import inf, nan, isnan
logical_not = lambda x: not x # type: ignore[assignment]
if HAVE_SCIPY:
from scipy.interpolate import interp1d
#%% Globals
_ALLOWED_ENVS: Optional[Dict[str, str]] = None # allows any environment variables to be invoked
if TYPE_CHECKING:
_StrOrListStr = TypeVar('_StrOrListStr', str, List[str])
_SingleNum = Union[int, float, np.ndarray, np.datetime64]
_Lists = Union[np.ndarray, List[np.ndarray], Tuple[np.ndarray, ...]]
_Number = Union[float, np.ndarray]
#%% Functions - _nan_equal
def _nan_equal(a: Any, b: Any, /, tolerance: float = None) -> bool:
r"""
Test ndarrays for equality, but ignore NaNs.
Parameters
----------
a : ndarray
Array one
b : ndarray
Array two
tolerance : float, optional
Numerical tolerance used to compare two numbers that are close together to consider them equal
Returns
-------
bool
Flag for whether the inputs are the same or not
Examples
--------
>>> from dstauffman.utils import _nan_equal
>>> import numpy as np
>>> a = np.array([1, 2, np.nan])
>>> b = np.array([1, 2, np.nan])
>>> print(_nan_equal(a, b))
True
>>> a = np.array([1, 2, np.nan])
>>> b = np.array([3, 2, np.nan])
>>> print(_nan_equal(a, b))
False
"""
def _is_nan(x) -> bool:
try:
out = isnan(x)
except:
return False
return out
try:
if HAVE_NUMPY:
# use numpy testing module to assert that they are equal (ignores NaNs)
do_simple = tolerance is None or tolerance == 0 or a is None or b is None
if not do_simple:
# see if these can be cast to numeric values that can be compared
try:
_ = np.isfinite(a) & np.isfinite(b)
except TypeError:
do_simple = True
except:
pass
if do_simple:
np.testing.assert_array_equal(a, b)
else:
np.testing.assert_allclose(a, b, atol=tolerance, equal_nan=True)
else:
if tolerance is not None and tolerance != 0:
raise ValueError('You must have numpy installed to use a non-zero tolerance.')
if a != b:
return False
if hasattr(a, '__len__'):
if hasattr(b, '__len__'):
if len(a) != len(b):
return False
return all(x == y or _is_nan(x) or _is_nan(y) for (x, y) in zip(a, b))
else:
return False
else:
if hasattr(b, '__len__'):
return False
return a == b or _is_nan(a) or _is_nan(b)
except AssertionError:
# if assertion fails, then they are not equal
return False
return True
#%% Functions - find_in_range
def find_in_range(value: ArrayLike, min_: _SingleNum = -inf, max_: _SingleNum = inf, *, \
inclusive: bool = False, mask: Union[bool, np.ndarray] = None, precision: _SingleNum = 0, \
left: bool = False, right: bool = False) -> np.ndarray:
r"""
Finds values in the given range.
Parameters
----------
value : (N,) ndarray of float
Value to compare against, which could be NaN
Returns
-------
valid : (N,) ndarray of bool
True or False flag for whether the person in the given range
min_ : int or float, optional
Minimum value to include in range
max_ : int or float, optional
Maximum value to include in range
inclusive : bool, optional, default is False
Whether to inclusively count bount endpoints (overrules left and right)
mask : (N,) ndarray of bool, optional
A mask to preapply to the results
precision : int or float, optional, default is zero
A precision to apply to the comparisons
left : bool, optional, default is False
Whether to include the left endpoint in the range
right : bool, optional, default is False
Whether to include the right endpoint in the range
Notes
-----
#. Written by David C. Stauffer in August 2020.
Examples
--------
>>> from dstauffman import find_in_range
>>> import numpy as np
>>> valid = find_in_range(np.array([-1, 15, 30.3, 40, 0, 0, 10, np.nan, 8000]), min_=12, max_=35)
>>> print(valid)
[False True True False False False False False False]
"""
# ensure this is an ndarray
value = np.asanyarray(value)
# find the people with valid values to compare against
not_nan = ~np.isnan(value)
if mask is not None:
not_nan &= mask
# find those greater than the minimum bound
if np.isfinite(min_):
func = np.greater_equal if inclusive or left else np.greater
valid = func(value, min_-precision, out=np.zeros(value.shape, dtype=bool), where=not_nan) # type: ignore[operator]
else:
assert ~np.isnan(min_) and np.sign(min_) < 0, 'The minimum should be -np.inf if not finite.'
valid = not_nan.copy()
# combine with those less than the maximum bound
if np.isfinite(max_):
func = np.less_equal if inclusive or right else np.less
valid &= func(value, max_+precision, out=np.zeros(value.shape, dtype=bool), where=not_nan) # type: ignore[operator]
else:
assert ~np.isnan(max_) and np.sign(max_) > 0, 'The maximum should be np.inf if not finite.'
return valid # type: ignore[no-any-return]
#%% Functions - rms
@overload
def rms(data: ArrayLike, axis: Literal[None] = ..., keepdims: bool = ..., ignore_nans: bool = ...) -> float: ...
@overload
def rms(data: ArrayLike, axis: int, keepdims: Literal[False] = ..., ignore_nans: bool = ...) -> _Number: ...
@overload
def rms(data: ArrayLike, axis: int, keepdims: Literal[True], ignore_nans: bool = ...) -> np.ndarray: ...
def rms(data: ArrayLike, axis: int = None, keepdims: bool = False, ignore_nans: bool = False) -> _Number:
r"""
Calculate the root mean square of a number series.
Parameters
----------
data : array_like
input data
axis : int, optional
Axis along which RMS is computed. The default is to compute the RMS of the flattened array.
keepdims : bool, optional
If true, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original `data`.
Returns
-------
out : ndarray
RMS results
See Also
--------
numpy.mean, numpy.nanmean, numpy.conj, numpy.sqrt
Notes
-----
#. Written by David C. Stauffer in Mar 2015.
Examples
--------
>>> from dstauffman import rms
>>> rms([0, 1, 0., -1])
0.7071067811865476
"""
# check for empty data
if not np.isscalar(data) and len(data) == 0: # type: ignore[arg-type]
return np.nan
# do the root-mean-square, but use x * conj(x) instead of square(x) to handle complex numbers correctly
if not ignore_nans:
out = np.sqrt(np.mean(data * np.conj(data), axis=axis, keepdims=keepdims))
else:
# check for all NaNs case
if np.all(np.isnan(data)):
if axis is None:
out = np.nan
else:
assert isinstance(data, np.ndarray)
if keepdims:
shape = (*data.shape[:axis], 1, *data.shape[axis+1:])
else:
shape = (*data.shape[:axis], *data.shape[axis+1:])
out = np.full(shape, np.nan)
else:
out = np.sqrt(np.nanmean(data * np.conj(data), axis=axis, keepdims=keepdims))
# return the result
return out # type: ignore[no-any-return]
#%% Functions - rss
@overload
def rss(data: ArrayLike, axis: Literal[None] = ..., keepdims: bool = ..., ignore_nans: bool = ...) -> float: ...
@overload
def rss(data: ArrayLike, axis: int, keepdims: Literal[False] = ..., ignore_nans: bool = ...) -> _Number: ...
@overload
def rss(data: ArrayLike, axis: int, keepdims: Literal[True], ignore_nans: bool = ...) -> np.ndarray: ...
def rss(data: ArrayLike, axis: int = None, keepdims: bool = False, ignore_nans: bool = False) -> _Number:
r"""
Calculate the root sum square of a number series.
Parameters
----------
data : array_like
input data
axis : int, optional
Axis along which RMS is computed. The default is to compute the RMS of the flattened array.
keepdims : bool, optional
If true, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original `data`.
Returns
-------
out : ndarray
RSS results
See Also
--------
numpy.sum, numpy.nansum, numpy.conj
Notes
-----
#. Written by David C. Stauffer in April 2016.
Examples
--------
>>> from dstauffman import rss
>>> rss([0, 1, 0., -1])
2.0
"""
# check for empty data
if not np.isscalar(data) and len(data) == 0: # type: ignore[arg-type]
return np.nan
# do the root-mean-square, but use x * conj(x) instead of square(x) to handle complex numbers correctly
if not ignore_nans:
out = np.sum(data * np.conj(data), axis=axis, keepdims=keepdims) # type: ignore[arg-type]
else:
# check for all NaNs case
if np.all(np.isnan(data)):
if axis is None:
out = np.nan
else:
assert isinstance(data, np.ndarray)
if keepdims:
shape = (*data.shape[:axis], 1, *data.shape[axis+1:])
else:
shape = (*data.shape[:axis], *data.shape[axis+1:])
out = np.full(shape, np.nan)
else:
out = np.nansum(data * np.conj(data), axis=axis, keepdims=keepdims)
# return the result
return out # type: ignore[no-any-return]
#%% Functions - compare_two_classes
def compare_two_classes(c1: Any, c2: Any, /, suppress_output: bool = False, names: Union[Tuple[str, str], List[str]] = None, \
ignore_callables: bool = True, compare_recursively: bool = True, is_subset: bool = False, \
tolerance: float = None) -> bool:
r"""
Compare two classes by going through all their public attributes and showing that they are equal.
Parameters
----------
c1 : class object
Any class object
c2 : class object
Any other class object
suppress_output : bool, optional
If True, suppress the information printed to the screen, defaults to False.
names : list of str, optional
List of the names to be printed to the screen for the two input classes.
ignore_callables : bool, optional
If True, ignore differences in callable attributes (i.e. methods), defaults to True.
is_subset : bool, optional
If True, only compares in c1 is a strict subset of c2, but c2 can have extra fields, defaults to False
tolerance : float, optional
Numerical tolerance used to compare two numbers that are close together to consider them equal
Returns
-------
is_same : bool
True/False flag for whether the two class are the same.
Examples
--------
>>> from dstauffman import compare_two_classes
>>> c1 = type('Class1', (object, ), {'a': 0, 'b' : '[1, 2, 3]', 'c': 'text'})
>>> c2 = type('Class2', (object, ), {'a': 0, 'b' : '[1, 2, 4]', 'd': 'text'})
>>> is_same = compare_two_classes(c1, c2)
b is different from c1 to c2.
c is only in c1.
d is only in c2.
"c1" and "c2" are not the same.
"""
def _not_true_print():
r"""Set is_same to False and optionally prints information to the screen."""
is_same = False
if not suppress_output:
print(f'{this_attr} is different from {name1} to {name2}.')
return is_same
def _is_function(obj):
r"""Determine whether the object is a function or not."""
# need second part for Python compatibility for v2.7, which distinguishes unbound methods from functions.
return inspect.isfunction(obj) or inspect.ismethod(obj) or inspect.isbuiltin(obj)
def _is_class_instance(obj):
r"""Determine whether the object is an instance of a class or not."""
return hasattr(obj, '__dict__') and not _is_function(obj) # and hasattr(obj, '__call__')
def _is_public(name):
r"""Returns True if the name is public, ie doesn't start with an underscore."""
return not name.startswith('_')
# preallocate answer to True until proven otherwise
is_same = True
# get names if specified
if names is not None:
name1 = names[0]
name2 = names[1]
else:
name1 = 'c1'
name2 = 'c2'
# simple test
if c1 is not c2:
# get the list of public attributes
attrs1 = frozenset(filter(_is_public, dir(c1)))
attrs2 = frozenset(filter(_is_public, dir(c2)))
# compare the attributes that are in both
same = attrs1 & attrs2
for this_attr in sorted(same):
# alias the attributes
attr1 = inspect.getattr_static(c1, this_attr)
attr2 = inspect.getattr_static(c2, this_attr)
# determine if this is a subclass
if _is_class_instance(attr1):
if _is_class_instance(attr2):
if compare_recursively:
names = [name1 + '.' + this_attr, name2 + '.' + this_attr]
# Note: don't want the 'and' to short-circuit, so do the 'and is_same' last
if isinstance(attr1, dict) and isinstance(attr2, dict):
is_same = compare_two_dicts(attr1, attr2, suppress_output=suppress_output, \
names=names, is_subset=is_subset, tolerance=tolerance) and is_same
else:
is_same = compare_two_classes(attr1, attr2, suppress_output=suppress_output, \
names=names, ignore_callables=ignore_callables, \
compare_recursively=compare_recursively, is_subset=is_subset, \
tolerance=tolerance) and is_same
continue
else:
continue # pragma: no cover (actually covered, optimization issue)
else:
is_same = _not_true_print()
continue
else:
if _is_class_instance(attr2):
is_same = _not_true_print()
if _is_function(attr1) or _is_function(attr2):
if ignore_callables:
continue # pragma: no cover (actually covered, optimization issue)
else:
is_same = _not_true_print()
continue
# if any differences, then this test fails
if isinstance(attr1, Mapping) and isinstance(attr2, Mapping):
is_same = compare_two_dicts(attr1, attr2, suppress_output=True, is_subset=is_subset, \
tolerance=tolerance) and is_same
elif logical_not(_nan_equal(attr1, attr2, tolerance=tolerance)):
is_same = _not_true_print()
# find the attributes in one but not the other, if any, then this test fails
diff = attrs1 ^ attrs2
for this_attr in sorted(diff):
if is_subset and this_attr in attrs2:
# if only checking that c1 is a subset of c2, then skip this condition
continue
is_same = False
if not suppress_output:
if this_attr in attrs1:
print(f'{this_attr} is only in {name1}.')
else:
print(f'{this_attr} is only in {name2}.')
# display results
if not suppress_output:
if is_same:
subset_text = ' (subset)' if is_subset else ''
print(f'"{name1}" and "{name2}" are the same{subset_text}.')
else:
print(f'"{name1}" and "{name2}" are not the same.')
return is_same
#%% Functions - compare_two_dicts
def compare_two_dicts(d1: Mapping[Any, Any], d2: Mapping[Any, Any], /, suppress_output: bool = False, \
names: Union[Tuple[str, str], List[str]] = None, is_subset: bool = False, \
tolerance: float = None) -> bool:
r"""
Compare two dictionaries for the same keys, and the same value of those keys.
Parameters
----------
d1 : class object
Any class object
d2 : class object
Any other class object
suppress_output : bool, optional
If True, suppress the information printed to the screen, defaults to False.
names : list of str, optional
List of the names to be printed to the screen for the two input classes.
is_subset : bool, optional
If True, only compares in c1 is a strict subset of c2, but c2 can have extra fields, defaults to False
tolerance : float, optional
Numerical tolerance used to compare two numbers that are close together to consider them equal
Returns
-------
is_same : bool
True/False flag for whether the two class are the same.
Examples
--------
>>> from dstauffman import compare_two_dicts
>>> d1 = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = {'a': 1, 'b': 5, 'd': 6}
>>> is_same = compare_two_dicts(d1, d2)
b is different.
c is only in d1.
d is only in d2.
"d1" and "d2" are not the same.
"""
# preallocate answer to True until proven otherwise
is_same = True
# get names if specified
if names is not None:
name1 = names[0]
name2 = names[1]
else:
name1 = 'd1'
name2 = 'd2'
# simple test
if d1 is not d2:
# compare the keys that are in both
same = set(d1) & set(d2)
for key in sorted(same):
s1 = d1[key]
s2 = d2[key]
if isinstance(s1, dict) and isinstance(s2, dict):
is_same = compare_two_dicts(s1, s2, suppress_output=suppress_output, \
names=[f"{name1}['{key}']", f"{name2}['{key}']"], is_subset=is_subset, tolerance=tolerance)
# if any differences, then this test fails
elif logical_not(_nan_equal(s1, s2, tolerance=tolerance)):
is_same = False
if not suppress_output:
print(f'{key} is different.')
# find keys in one but not the other, if any, then this test fails
diff = set(d1) ^ set(d2)
for key in sorted(diff):
if is_subset and key in d2:
# if only checking that d1 is a subset of d2, then skip this condition
continue
is_same = False
if not suppress_output:
if key in d1:
print(f'{key} is only in {name1}.')
else:
print(f'{key} is only in {name2}.')
# display results
if not suppress_output:
if is_same:
subset_text = ' (subset)' if is_subset else ''
print(f'"{name1}" and "{name2}" are the same{subset_text}.')
else:
print(f'"{name1}" and "{name2}" are not the same.')
return is_same
#%% Functions - read_text_file
def read_text_file(filename: Union[str, Path]) -> str:
r"""
Open and read a complete text file.
Parameters
----------
filename : str or class pathlib.Path
fullpath name of the file to read
Returns
-------
text : str
text of the desired file
Raises
------
RuntimeError
If unable to open, or unable to read file.
See Also
--------
write_text_file, open
Examples
--------
>>> from dstauffman import read_text_file, write_text_file, get_tests_dir
>>> import os
>>> text = 'Hello, World\n'
>>> filename = get_tests_dir() / 'temp_file.txt'
>>> write_text_file(filename, text)
>>> text2 = read_text_file(get_tests_dir() / 'temp_file.txt')
>>> print(text2)
Hello, World
<BLANKLINE>
>>> filename.unlink()
"""
try:
# open file for reading
with open(filename, 'rt') as file:
# read file
text = file.read() # pragma: no branch
# return results
return text
except:
# on any exceptions, print a message and re-raise the error
print(f'Unable to open file "{filename}" for reading.')
raise
#%% Functions - write_text_file
def write_text_file(filename: Union[str, Path], text: str) -> None:
r"""
Open and write the specified text to a file.
Parameters
----------
filename : str
fullpath name of the file to read
text : str
text to be written to the file
Raises
------
RuntimeError
If unable to open, or unable to write file.
See Also
--------
open_text_file, open
Examples
--------
>>> from dstauffman import write_text_file, get_tests_dir
>>> import os
>>> text = 'Hello, World\n'
>>> filename = get_tests_dir() / 'temp_file.txt'
>>> write_text_file(filename, text)
>>> filename.unlink()
"""
try:
# open file for writing
with open(filename, 'wt') as file:
# write file
file.write(text) # pragma: no branch
except:
# on any exceptions, print a message and re-raise the error
print(f'Unable to open file "{filename}" for writing.')
raise
#%% Functions - capture_output
@contextmanager
def capture_output(mode: str = 'out'):
r"""
Capture the stdout and stderr streams instead of displaying to the screen.
Parameters
----------
mode : str
Mode to use when capturing output
'out' captures just sys.stdout
'err' captures just sys.stderr
'all' captures both sys.stdout and sys.stderr
Returns
-------
out : class StringIO
stdout stream output
err : class StringIO
stderr stream output
Notes
-----
#. Written by David C. Stauffer in March 2015.
Examples
--------
>>> from dstauffman import capture_output
>>> with capture_output() as out:
... print('Hello, World!')
>>> output = out.getvalue().strip()
>>> out.close()
>>> print(output)
Hello, World!
"""
# alias modes
capture_out = True if mode == 'out' or mode == 'all' else False
capture_err = True if mode == 'err' or mode == 'all' else False
# create new string buffers
new_out, new_err = StringIO(), StringIO()
# alias the old string buffers for restoration afterwards
old_out, old_err = sys.stdout, sys.stderr
try:
# override the system buffers with the new ones
if capture_out:
sys.stdout = new_out
if capture_err:
sys.stderr = new_err
# yield results as desired
if mode == 'out':
yield sys.stdout
elif mode == 'err':
yield sys.stderr
elif mode == 'all':
yield sys.stdout, sys.stderr
finally:
# restore the original buffers once all results are read
sys.stdout, sys.stderr = old_out, old_err
#%% Functions - unit
def unit(data: _Lists, axis: int = 0) -> np.ndarray:
r"""
Normalize a matrix into unit vectors along a specified dimension.
Parameters
----------
data : ndarray
Data
axis : int, optional
Axis upon which to normalize, defaults to first axis (i.e. column normalization for 2D matrices)
Returns
-------
norm_data : ndarray
Normalized data
See Also
--------
sklearn.preprocessing.normalize
Notes
-----
#. Written by David C. Stauffer in May 2015.
Examples
--------
>>> from dstauffman import unit
>>> import numpy as np
>>> data = np.array([[1, 0, -1], [0, 0, 0], [0, 0, 1]])
>>> norm_data = unit(data, axis=0)
>>> with np.printoptions(precision=8):
... print(norm_data) # doctest: +NORMALIZE_WHITESPACE
[[ 1. 0. -0.70710678]
[ 0. 0. 0. ]
[ 0. 0. 0.70710678]]
"""
if isinstance(data, (list, tuple)):
data = np.vstack(data).T
assert isinstance(data, np.ndarray)
if axis >= data.ndim:
raise ValueError('axis {} is out of bounds for array of dimension {}'.format(axis, data.ndim))
# calculate the magnitude of each vector
mag = np.atleast_1d(np.sqrt(np.sum(data * np.conj(data), axis=axis)))
# check for zero vectors, and replace magnitude with 1 to make them unchanged
mag[mag == 0] = 1
# calculate the new normalized data
norm_data: np.ndarray = data / mag
return norm_data
#%% modd
def modd(x1, x2, /, out=None):
r"""
Return element-wise remainder of division, except that instead of zero it gives the divisor instead.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it must be of the right
shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient x1/x2, element-wise. Returns a scalar if both x1 and x2 are
scalars. Replaces what would be zeros in the normal modulo command with the divisor instead.
Notes
-----
#. Written by David C. Stauffer in October 2015.
Examples
--------
>>> from dstauffman import modd
>>> import numpy as np
>>> x1 = np.array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> x2 = 4
>>> y = modd(x1, x2)
>>> print(y)
[4 1 2 3 4 1 2 3 4]
"""
x1 = np.asanyarray(x1)
if out is None:
y = np.mod(x1 - 1, x2) + 1
return y
else:
np.mod(x1 - 1, x2, out)
np.add(out, 1, out) # needed to force add to be inplace operation
#%% is_np_int
def is_np_int(x, /):
r"""
Returns True if the input is an int or any form of an np.integer type.
Parameters
----------
x : int, float or ndarray
Input value
Returns
-------
bool
Whether input is an integer type
Examples
--------
>>> from dstauffman import is_np_int
>>> import numpy as np
>>> print(is_np_int(1))
True
>>> print(is_np_int(1.))
False
>>> print(is_np_int(np.array([1, 2])))
True
>>> print(is_np_int(np.array([1., 2.])))
False
>>> print(is_np_int(np.array(2**62)))
True
"""
if isinstance(x, int) or (hasattr(x, 'dtype') and np.issubdtype(x.dtype, np.integer)):
return True
return False
#%% np_digitize
def np_digitize(x, /, bins, right=False):
r"""
Act as a wrapper to the numpy.digitize function with customizations.
The customizations include additional error checks, and bins starting from 0 instead of 1.
Parameters
----------
x : array_like
Input array to be binned.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is closed in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
out : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
numpy.digitize
Notes
-----
#. This function is equilavent to the MATLAB `discretize` function.
Examples
--------
>>> from dstauffman import np_digitize
>>> import numpy as np
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> out = np_digitize(x, bins)
>>> print(out)
[0 3 2 1]
"""
# allow an empty x to pass through just fine
if x.size == 0:
return np.array([], dtype=int)
# check for NaNs
if np.any(np.isnan(x)):
raise ValueError('Some values were NaN.')
# check the bounds
tolerance = None # TODO: do I need a tolerance here?
bmin = bins[0] if tolerance is None else bins[0] - tolerance
bmax = bins[-1] if tolerance is None else bins[-1] + tolerance
if right:
if np.any(x <= bmin) or np.any(x > bmax):
raise ValueError('Some values of x are outside the given bins.')
else:
if np.any(x < bmin) or np.any(x >= bmax):
raise ValueError('Some values of x are outside the given bins.')
# do the calculations by calling the numpy command and shift results by one
out = np.digitize(x, bins, right) - 1
return out
#%% histcounts
def histcounts(x, /, bins, right=False):
r"""
Count the number of points in each of the given bins.
Parameters
----------
x : array_like
Input array to be binned.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
hist : ndarray of ints
Output array the number of points in each bin
See Also
--------
numpy.digitize, np_digitize
Notes
-----
#. This function is equilavent to the MATLAB `histcounts` function.
Examples
--------
>>> from dstauffman import histcounts
>>> import numpy as np
>>> x = np.array([0.2, 6.4, 3.0, 1.6, 0.5])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> hist = histcounts(x, bins)
>>> print(hist)
[2 1 1 1]
"""
# get the bin number that each point is in
ix_bin = np_digitize(x, bins, right=right)
# count the number in each bin
hist = np.bincount(ix_bin, minlength=len(bins)-1)
return hist
#%% full_print
@contextmanager
def full_print(**kwargs):
r"""
Context manager for printing full numpy arrays.
Parameters
----------
kwargs : dict, optional
Arguments that will be passed through to the np.set_printoptions function
Notes
-----
#. Adapted by David C. Stauffer in January 2017 from a stackover flow answer by Paul Price,
given here: http://stackoverflow.com/questions/1987694/print-the-full-numpy-array
#. Updated by David C. Stauffer in August 2020 to allow arbitrary arguments to pass through.
Examples
--------
>>> from dstauffman import full_print
>>> import numpy as np
>>> temp_options = np.get_printoptions()
>>> np.set_printoptions(threshold=10)
>>> a = np.zeros((10, 5))
>>> a[3, :] = 1.23
>>> print(a) # doctest: +NORMALIZE_WHITESPACE
[[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
...
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]]
>>> with full_print():
... print(a)
[[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[1.23 1.23 1.23 1.23 1.23]
[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]
[0. 0. 0. 0. 0. ]]
>>> np.set_printoptions(**temp_options)
"""
# get the desired threshold, default is all elements
threshold = kwargs.pop('threshold', sys.maxsize)
# get current options
opt = np.get_printoptions()
# update to print all elements and any other criteria specified
np.set_printoptions(threshold=threshold, **kwargs)
# yield options for the context manager to do it's thing
yield
# reset the options back to what they were originally
np.set_printoptions(**opt)
#%% line_wrap
@overload
def line_wrap(text: str, wrap: int = 80, min_wrap: int = 0, indent: int = 4, line_cont: str = '\\') -> str: ...
@overload
def line_wrap(text: List[str], wrap: int = 80, min_wrap: int = 0, indent: int = 4, line_cont: str = '\\') -> List[str]: ...
def line_wrap(text: _StrOrListStr, wrap: int = 80, min_wrap: int = 0, indent: int = 4, line_cont: str = '\\') -> _StrOrListStr:
r"""
Wrap lines of text to the specified length, breaking at any whitespace characters.
Parameters
----------
text : str or list of str
Text to be wrapped
wrap : int, optional
Number of characters to wrap text at, default is 80
min_wrap : int, optional
Minimum number of characters to wrap at, default is 0
indent : int, optional
Number of characters to indent the next line with, default is 4
line_cont : str, optional
Line continuation character, default is '\'
Returns
-------
out : str or list of str
wrapped form of text
Examples
--------
>>> from dstauffman import line_wrap
>>> text = ('lots of repeated words ' * 4).strip()
>>> wrap = 40
>>> out = line_wrap(text, wrap)
>>> print(out)
lots of repeated words lots of \
repeated words lots of repeated \
words lots of repeated words
"""
# check if single str
if isinstance(text, str):
text_list = [text]
else:
text_list = text
# create the pad for any newline
pad = ' ' * indent
# initialize output
out: List[str] = []
# loop through text lines
for this_line in text_list:
# determine if too long
while len(this_line) > wrap:
# find the last whitespace to break on, possibly with a minimum start
space_break = this_line.rfind(' ', min_wrap, wrap-1)
if space_break == -1 or space_break <= indent:
raise ValueError('The specified min_wrap:wrap of "{}:{}" was too small.'.format(min_wrap, wrap))
# add the shorter line
out.append(this_line[:space_break] + ' ' + line_cont)
# reduce and repeat
this_line = pad + this_line[space_break+1:]
# add the final shorter line
out.append(this_line)
if isinstance(text, str):
return '\n'.join(out)
return out
#%% combine_per_year
@overload
def combine_per_year(data: None, func: Any) -> None: ...
@overload
def combine_per_year(data: np.ndarray, func: Any = ...) -> np.ndarray: ...
def combine_per_year(data: Optional[np.ndarray], func: Callable[..., Any] = None) -> Optional[np.ndarray]:
r"""
Combine the time varying values over one year increments using a supplied function.
Parameters
----------
data : ndarray, 1D or 2D
Data array
Returns
-------
data2 : ndarray, 1D or 2D
Data array combined as mean over 12 month periods
Notes
-----
#. Written by David C. Stauffer in October 2015.
#. Made more generic by David C. Stauffer in August 2017.
#. This function was designed with np.nanmean and np.nansum in mind.
Examples
--------
>>> from dstauffman import combine_per_year
>>> import numpy as np
>>> time = np.arange(120)
>>> data = np.sin(time)
>>> data2 = combine_per_year(data, func=np.mean)
"""
# check that a function was provided
assert func is not None and callable(func), 'A callable function must be provided.'
# check for null case and exit
if data is None:
return None
# check dimensionality
is_1d = True if data.ndim == 1 else False
# get original sizes
if is_1d:
data = data[:, np.newaxis]
assert isinstance(data, np.ndarray)
(num_time, num_chan) = data.shape
num_year = num_time // MONTHS_PER_YEAR
# check for case with all NaNs
if np.all(np.isnan(data)):
data2 = np.full((num_year, num_chan), np.nan, dtype=float)
else:
# disables warnings for time points that are all NaNs for nansum or nanmean
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='Mean of empty slice')
warnings.filterwarnings('ignore', message='Sum of empty slice')
# calculate sum or mean (or whatever)
data2 = func(np.reshape(data[:num_year*MONTHS_PER_YEAR, :], (num_year, MONTHS_PER_YEAR, num_chan)), axis=1)
# optionally squeeze the vector case back to 1D
if is_1d:
data2 = data2.squeeze(axis=1)
return data2
#%% Functions - execute
def execute(command: Union[str, List[str]], folder: Path, *, ignored_codes: Iterable[int] = None, \
env: Dict[str, str] = None):
r"""
Wrapper to subprocess that allows the screen to be updated for long running commands.
Parameters
----------
command : str or list of str
Command to execute
folder : class pathlib.Path
Path to execute the command in
ignored_codes : iterable of int, optional
If given, a list of non-zero error codes to ignore
env : dict
Dictionary of environment variables to update for the call
Returns
-------
rc : ReturnCodes enum
return code from running the command
Notes
-----
#. Written by David C. Stauffer in October 2019.
#. Updated by David C. Stauffer in April 2021 to use pathlib.
Examples
--------
>>> from dstauffman import execute
>>> import pathlib
>>> command = 'ls'
>>> folder = pathlib.Path.cwd()
>>> # Note that this command may not work right within the IPython console, it's intended for command windows.
>>> execute(command, folder) # doctest: +SKIP
"""
# overlay environment variables
if env is not None:
env = os.environ.copy().update(env)
# create a process to spawn the thread
popen = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=None, \
cwd=folder, shell=False, universal_newlines=True, env=env)
# intermittenly read output lines
for stdout_line in iter(popen.stdout.readline, ''): # type: ignore[union-attr]
yield stdout_line
# once done, close and get return codes
popen.stdout.close() # type: ignore[union-attr]
return_code = popen.wait()
# method 2
# while True:
# output = process.stdout.readline()
# if output == '' and process.poll() is not None:
# break
# if output:
# yield output
# process.stdout.close()
# return_code = process.poll()
# determine if command exited cleanly or not and return appropriate code
if return_code:
if ignored_codes is None or return_code not in ignored_codes:
#raise subprocess.CalledProcessError(return_code, command)
return ReturnCodes.bad_command
return ReturnCodes.clean
#%% Functions - execute_wrapper
def execute_wrapper(command: Union[str, List[str]], folder: Path, *, dry_run: bool = False, \
ignored_codes: Iterable[int] = None, filename: Path = None, env: Dict[str, str] = None, \
print_status: bool = True) -> Union[ReturnCodes, List[str]]:
r"""
Wrapper to the wrapper to subprocess with options to print the command do dry-runs.
Parameters
----------
command : str or list of str
Command to execute
folder : class pathlib.Path
Path to execute the command in
dry_run : bool, optional, default is False
Whether the command should be displayed or actually run
ignored_codes : int or iterable of int, optional, default is None
If given, a list of non-zero error codes to ignore
filename : class pathlib.Path, optional, default is to not write
Name of the file to write the output to, ignore if empty string
env : dict, optional
Dictionary of environment variables to update for the call
print_status : bool, optional, default is True
Whether to print the status of the command to standard output
Notes
-----
#. Written by David C. Stauffer in November 2019.
#. Updated by David C. Stauffer in April 2021 to use pathlib.
Examples
--------
>>> from dstauffman import execute_wrapper
>>> import pathlib
>>> command = 'ls'
>>> folder = pathlib.Path.cwd()
>>> dry_run = True
>>> rc = execute_wrapper(command, folder, dry_run=dry_run) # doctest: +ELLIPSIS
Would execute "ls" in "..."
"""
# simple dry run case, just display what would happen
if dry_run:
if isinstance(command, list):
command = shlex.join(command)
print('Would execute "{}" in "{}"'.format(command, folder))
return ReturnCodes.clean
# clean up command
if isinstance(command, str):
command_list = shlex.split(command)
elif isinstance(command, list):
command_list = command
else:
raise TypeError('Unexpected type for the command list.')
# check that the folder exists
if not folder.is_dir():
print('Warning: folder "{}" doesn\'t exist, so command "{}" was not executed.'.format(folder, command))
return ReturnCodes.bad_folder
# execute command and print status
assert print_status or filename is not None, 'You must either print the status or save results to a filename.'
if print_status:
lines = []
for line in execute(command_list, folder, ignored_codes=ignored_codes, env=env):
# print each line as it comes so you can review long running commands as they execute
print(line, end='')
lines.append(line)
else:
lines = list(execute(command_list, folder, ignored_codes=ignored_codes, env=env))
# optionally write to text file if a filename is given
if filename is not None:
write_text_file(filename, ''.join(lines))
return lines
#%% Functions - get_env_var
def get_env_var(env_key: str, default: str = None) -> str:
r"""
Return an environment variable assuming is has been set.
Parameters
----------
env_key : str
Environment variable to try and retrieve.
default : str, optional
Default value to use if the variable doesn't exist, if None, an error is raised
Returns
-------
value : str
Value of the given environment variable
Notes
-----
#. Written by Alex Kershetsky in November 2019.
#. Incorporated into dstauffman tools by David C. Stauffer in January 2020.
Examples
--------
>>> from dstauffman import get_env_var
>>> value = get_env_var('HOME')
"""
if _ALLOWED_ENVS is not None:
if env_key not in _ALLOWED_ENVS:
raise KeyError('The environment variable of "{}" is not on the allowed list.'.format(env_key))
try:
value = os.environ[env_key]
except KeyError:
if default is None:
raise KeyError('The appropriate environment variable "{}" has not been set.'.format(env_key)) from None
value = default
return value
#%% Functions - get_username
def get_username() -> str:
r"""
Gets the current username based on environment variables.
Returns
-------
username : str
Name of the username
Notes
-----
#. Written by David C. Stauffer in August 2020.
Examples
--------
>>> from dstauffman import get_username
>>> username = get_username()
"""
if IS_WINDOWS:
return os.environ['USERNAME']
return os.environ['USER']
#%% Functions - is_datetime
def is_datetime(time: ArrayLike) -> bool:
r"""
Determines if the given time is either a datetime.datetime or np.datetime64 or just a regular number.
Parameters
----------
time : float
Time
Returns
-------
out : bool
Whether this is a datetime
Notes
-----
#. Written by David C. Stauffer in May 2020.
Examples
--------
>>> from dstauffman import is_datetime
>>> import datetime
>>> import numpy as np
>>> time1 = 0.5
>>> time2 = np.datetime64('now')
>>> time3 = datetime.datetime.now()
>>> print(is_datetime(time1))
False
>>> print(is_datetime(time2))
True
>>> print(is_datetime(time3))
True
"""
out = False
if isinstance(time, datetime.datetime) or (hasattr(time, 'dtype') and \
np.issubdtype(time.dtype, np.datetime64)): # type: ignore[union-attr]
out = True
return out
#%% Functions - intersect
def intersect(a, b, /, *, tolerance=0, assume_unique=False, return_indices=False):
r"""
Finds the intersect of two arrays given a numerical tolerance.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
a, b : array_like
Input arrays. Will be flattened if not already 1D.
tolerance : float or int
Tolerance for which something is considered unique
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
return_indices : bool
If True, the indices which correspond to the intersection of the two
arrays are returned. The first instance of a value is used if there are
multiple. Default is False.
Returns
-------
c : ndarray
Sorted 1D array of common and unique elements.
ia : ndarray
The indices of the first occurrences of the common values in `ar1`.
Only provided if `return_indices` is True.
ib : ndarray
The indices of the first occurrences of the common values in `ar2`.
Only provided if `return_indices` is True.
See Also
--------
numpy.intersect1d : Function used to do comparsion with sets of quantized inputs.
Notes
-----
#. Written by David C. Stauffer in March 2019.
#. Updated by David C. Stauffer in June 2020 to allow for a numeric tolerance.
Examples
--------
>>> from dstauffman import intersect
>>> import numpy as np
>>> a = np.array([1, 2, 4, 4, 6], dtype=int)
>>> b = np.array([0, 8, 2, 2, 5, 8, 6, 8, 8], dtype=int)
>>> (c, ia, ib) = intersect(a, b, return_indices=True)
>>> print(c)
[2 6]
>>> print(ia)
[1 4]
>>> print(ib)
[2 6]
"""
# allow a zero tolerance to be passed in and behave like the normal intersect command
if hasattr(tolerance, 'dtype') and np.issubdtype(tolerance.dtype, np.timedelta64):
tol_is_zero = tolerance.astype(np.int64) == 0 # Note that this avoids a numpy bug, see issue 6784
else:
tol_is_zero = tolerance == 0
if tol_is_zero:
return np.intersect1d(a, b, assume_unique=assume_unique, return_indices=return_indices)
# allow list and other array_like inputs (or just scalar floats)
a = np.atleast_1d(np.asanyarray(a))
b = np.atleast_1d(np.asanyarray(b))
tolerance = np.asanyarray(tolerance)
# check for datetimes and convert to integers
is_dates = np.array([is_datetime(a), is_datetime(b)], dtype=bool)
assert np.count_nonzero(is_dates) != 1, 'Both arrays must be datetimes if either is.'
if np.any(is_dates):
orig_datetime = a.dtype
a = a.astype(np.int64)
b = b.astype(np.int64)
tolerance = tolerance.astype(np.int64)
# check if largest component of a and b is too close to the tolerance floor (for floats)
all_int = is_np_int(a) and is_np_int(b) and is_np_int(tolerance)
max_a_or_b = np.max((np.max(np.abs(a), initial=0), np.max(np.abs(b), initial=0)))
if not all_int and ((max_a_or_b / tolerance) > (0.01/ np.finfo(float).eps)):
warnings.warn('This function may have problems if tolerance gets too small.')
# due to the splitting of the quanta, two very close numbers could still fail the quantized intersect
# fix this by repeating the comparison when shifted by half a quanta in either direction
half_tolerance = tolerance / 2
if all_int:
# allow for integer versions of half a quanta in either direction
lo_tol = np.floor(half_tolerance).astype(tolerance.dtype)
hi_tol = np.ceil(half_tolerance).astype(tolerance.dtype)
else:
lo_tol = half_tolerance
hi_tol = half_tolerance
# create quantized version of a & b, plus each one shifted by half a quanta
a1 = np.floor_divide(a, tolerance)
b1 = np.floor_divide(b, tolerance)
a2 = np.floor_divide(a - lo_tol, tolerance)
b2 = np.floor_divide(b - lo_tol, tolerance)
a3 = np.floor_divide(a + hi_tol, tolerance)
b3 = np.floor_divide(b + hi_tol, tolerance)
# do a normal intersect on the quantized data for different comparisons
(_, ia1, ib1) = np.intersect1d(a1, b1, assume_unique=assume_unique, return_indices=True)
(_, ia2, ib2) = np.intersect1d(a1, b2, assume_unique=assume_unique, return_indices=True)
(_, ia3, ib3) = np.intersect1d(a1, b3, assume_unique=assume_unique, return_indices=True)
(_, ia4, ib4) = np.intersect1d(a2, b1, assume_unique=assume_unique, return_indices=True)
(_, ia5, ib5) = np.intersect1d(a3, b1, assume_unique=assume_unique, return_indices=True)
# combine the results
ia = reduce(np.union1d, [ia1, ia2, ia3, ia4, ia5])
ib = reduce(np.union1d, [ib1, ib2, ib3, ib4, ib5])
# calculate output
# Note that a[ia] and b[ib] should be the same with a tolerance of 0, but not necessarily otherwise
# This function returns the values from the first vector a
c = np.sort(a[ia])
if np.any(is_dates):
c = c.astype(orig_datetime)
if return_indices:
return (c, ia, ib)
return c
#%% issorted
def issorted(x, /, descend=False):
r"""
Tells whether the given array is sorted or not.
Parameters
----------
x : array_like
Input array
descend : bool, optional, default is False
Whether to check that the array is sorted in descending order
Notes
-----
#. Written by David C. Stauffer in July 2020.
Examples
--------
>>> from dstauffman import issorted
>>> x = np.array([1, 3, 3, 5, 7])
>>> print(issorted(x))
True
>>> y = np.array([3, 5, 1, 7])
>>> print(issorted(y))
False
"""
x = np.asanyarray(x)
if descend:
return np.all(x[1:] <= x[:-1])
return np.all(x[:-1] <= x[1:])
#%% zero_order_hold
def zero_order_hold(x, xp, yp, *, left=nan, assume_sorted=False, return_indices=False):
r"""
Interpolates a function by holding at the most recent value.
Parameters
----------
x : array_like
The x-coordinates at which to evaluate the interpolated values.
xp: 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument period is not specified. Otherwise, xp is internally sorted after normalizing the periodic boundaries with xp = xp % period.
yp: 1-D sequence of float or complex
The y-coordinates of the data points, same length as xp.
left: int or float, optional, default is np.nan
Value to use for any value less that all points in xp
assume_sorted : bool, optional, default is False
Whether you can assume the data is sorted and do simpler (i.e. faster) calculations
Returns
-------
y : float or complex (corresponding to fp) or ndarray
The interpolated values, same shape as x.
Notes
-----
#. Written by David C. Stauffer in July 2020.
Examples
--------
>>> from dstauffman import zero_order_hold
>>> import numpy as np
>>> xp = np.array([0., 111., 2000., 5000.])
>>> yp = np.array([0, 1, -2, 3])
>>> x = np.arange(0, 6001, dtype=float)
>>> y = zero_order_hold(x, xp, yp)
"""
# force arrays
x = np.asanyarray(x)
xp = np.asanyarray(xp)
yp = np.asanyarray(yp)
# find the minimum value, as anything left of this is considered extrapolated
xmin = xp[0] if assume_sorted else np.min(xp)
# check that xp data is sorted, if not, use slower scipy version
if assume_sorted or issorted(xp):
ix = np.searchsorted(xp, x, side='right') - 1
is_left = np.asanyarray(x) < xmin
out = np.where(is_left, left, yp[ix])
if return_indices:
return (out, np.where(is_left, None, ix))
return out
if not HAVE_SCIPY:
raise RuntimeError('You must have scipy available to run this.') # pragma: no cover
if return_indices:
raise RuntimeError('Data must be sorted in order to ask for indices.')
func = interp1d(xp, yp, kind='zero', fill_value='extrapolate', assume_sorted=False)
return np.where(np.asanyarray(x) < xmin, left, func(x).astype(yp.dtype))
#%% drop_following_time
def drop_following_time(times, drop_starts, dt_drop):
r"""
Drops the times within the dt_drop after drop_starts.
Parameters
----------
times : (N, ) array_like
Times at which you want to know the drop status
drop_starts : (M, ) array_like
Times at which the drops start
dt_drop : float or int
Delta time for each drop window
Returns
-------
drop_make : (N, ) ndarray of bool
Mask where the data points should be dropped
Notes
-----
#. Written by David C. Stauffer in August 2020.
Examples
--------
>>> from dstauffman import drop_following_time
>>> import numpy as np
>>> times = np.arange(50)
>>> drop_starts = np.array([5, 15, 17, 25])
>>> dt_drop = 3
>>> drop_mask = drop_following_time(times, drop_starts, dt_drop)
"""
# Version with for loop # TODO: would like to do this without the loop
drop_mask = np.zeros(times.size, dtype=bool)
for drop_time in drop_starts:
# drop the times within the specified window
drop_mask |= (times >= drop_time) & (times < drop_time + dt_drop)
return drop_mask
#%% Unit test
if __name__ == '__main__':
unittest.main(module='dstauffman.tests.test_utils', exit=False)
doctest.testmod(verbose=False)
|
lgpl-3.0
|
r39132/airflow
|
tests/contrib/hooks/test_bigquery_hook.py
|
3
|
37040
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from google.auth.exceptions import GoogleAuthError
import mock
from googleapiclient.errors import HttpError
from airflow.contrib.hooks import bigquery_hook as hook
from airflow.contrib.hooks.bigquery_hook import _cleanse_time_partitioning, \
_validate_value, _api_resource_configs_duplication_check
bq_available = True
try:
hook.BigQueryHook().get_service()
except GoogleAuthError:
bq_available = False
class TestPandasGbqPrivateKey(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
if not bq_available:
self.instance.extras['extra__google_cloud_platform__project'] = 'mock_project'
def test_key_path_provided(self):
private_key_path = '/Fake/Path'
self.instance.extras['extra__google_cloud_platform__key_path'] = private_key_path
with mock.patch('airflow.contrib.hooks.bigquery_hook.read_gbq',
new=lambda *args, **kwargs: kwargs['private_key']):
self.assertEqual(self.instance.get_pandas_df('select 1'), private_key_path)
def test_key_json_provided(self):
private_key_json = 'Fake Private Key'
self.instance.extras['extra__google_cloud_platform__keyfile_dict'] = private_key_json
with mock.patch('airflow.contrib.hooks.bigquery_hook.read_gbq', new=lambda *args,
**kwargs: kwargs['private_key']):
self.assertEqual(self.instance.get_pandas_df('select 1'), private_key_json)
def test_no_key_provided(self):
with mock.patch('airflow.contrib.hooks.bigquery_hook.read_gbq', new=lambda *args,
**kwargs: kwargs['private_key']):
self.assertEqual(self.instance.get_pandas_df('select 1'), None)
class TestBigQueryDataframeResults(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_output_is_dataframe_with_valid_query(self):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_invalid_query(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('Reason: ', str(context.exception), "")
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_succeeds_with_explicit_legacy_query(self):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_succeeds_with_explicit_std_query(self):
df = self.instance.get_pandas_df(
'select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_incompatible_syntax(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df(
'select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('Reason: ', str(context.exception), "")
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('dataset.table', None)
self.assertIn('INTERNAL: No default project is specified',
str(context.exception), "")
def test_split_dataset_table(self):
project, dataset, table = hook._split_tablename('dataset.table',
'project')
self.assertEqual("project", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative:dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_sql_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative.dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_colon_in_project(self):
project, dataset, table = hook._split_tablename('alt1:alt.dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_valid_double_column(self):
project, dataset, table = hook._split_tablename('alt1:alt:dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_invalid_syntax_triple_colon(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt3:dataset.table',
'project')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_triple_dot(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_column_double_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt.dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_colon_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt:dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_dot_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project', 'var_x')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
class TestBigQueryHookSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test", "test_schema.json", ["test_data.json"], source_format="json"
)
# since we passed 'json' in, and it's not valid, make sure it's present in the
# error string.
self.assertIn("JSON", str(context.exception))
class TestBigQueryExternalTableSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").create_external_table(
external_project_dataset_table='test.test',
schema_fields='test_schema.json',
source_uris=['test_data.json'],
source_format='json'
)
# since we passed 'csv' in, and it's not valid, make sure it's present in the
# error string.
self.assertIn("JSON", str(context.exception))
# Helpers to test_cancel_queries that have mock_poll_job_complete returning false,
# unless mock_job_cancel was called with the same job_id
mock_canceled_jobs = []
def mock_poll_job_complete(job_id):
return job_id in mock_canceled_jobs
def mock_job_cancel(projectId, jobId):
mock_canceled_jobs.append(jobId)
return mock.Mock()
class TestBigQueryBaseCursor(unittest.TestCase):
def test_invalid_schema_update_options(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"]
)
self.assertIn("THIS IS NOT VALID", str(context.exception))
def test_invalid_schema_update_and_write_disposition(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY'
)
self.assertIn("schema_update_options is only", str(context.exception))
def test_cancel_queries(self):
project_id = 12345
running_job_id = 3
mock_jobs = mock.Mock()
mock_jobs.cancel = mock.Mock(side_effect=mock_job_cancel)
mock_service = mock.Mock()
mock_service.jobs = mock.Mock(return_value=mock_jobs)
bq_hook = hook.BigQueryBaseCursor(mock_service, project_id)
bq_hook.running_job_id = running_job_id
bq_hook.poll_job_complete = mock.Mock(side_effect=mock_poll_job_complete)
bq_hook.cancel_query()
mock_jobs.cancel.assert_called_with(projectId=project_id, jobId=running_job_id)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_sql_dialect_default(self, run_with_config):
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
cursor.run_query('query')
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], True)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_sql_dialect_override(self, run_with_config):
for bool_val in [True, False]:
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
cursor.run_query('query', use_legacy_sql=bool_val)
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], bool_val)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_sql_dialect_legacy_with_query_params(self, run_with_config):
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
params = [{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"}
}]
cursor.run_query('query', use_legacy_sql=False, query_params=params)
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], False)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_sql_dialect_legacy_with_query_params_fails(self, run_with_config):
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
params = [{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"}
}]
with self.assertRaises(ValueError):
cursor.run_query('query', use_legacy_sql=True, query_params=params)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_api_resource_configs(self, run_with_config):
for bool_val in [True, False]:
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
cursor.run_query('query',
api_resource_configs={
'query': {'useQueryCache': bool_val}})
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useQueryCache'], bool_val)
self.assertIs(args[0]['query']['useLegacySql'], True)
def test_api_resource_configs_duplication_warning(self):
with self.assertRaises(ValueError):
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
cursor.run_query('query',
use_legacy_sql=True,
api_resource_configs={
'query': {'useLegacySql': False}})
def test_validate_value(self):
with self.assertRaises(TypeError):
_validate_value("case_1", "a", dict)
self.assertIsNone(_validate_value("case_2", 0, int))
def test_duplication_check(self):
with self.assertRaises(ValueError):
key_one = True
_api_resource_configs_duplication_check(
"key_one", key_one, {"key_one": False})
self.assertIsNone(_api_resource_configs_duplication_check(
"key_one", key_one, {"key_one": True}))
class TestTableDataOperations(unittest.TestCase):
def test_insert_all_succeed(self):
project_id = 'bq-project'
dataset_id = 'bq_dataset'
table_id = 'bq_table'
rows = [
{"json": {"a_key": "a_value_0"}}
]
body = {
"rows": rows,
"ignoreUnknownValues": False,
"kind": "bigquery#tableDataInsertAllRequest",
"skipInvalidRows": False,
}
mock_service = mock.Mock()
method = mock_service.tabledata.return_value.insertAll
method.return_value.execute.return_value = {
"kind": "bigquery#tableDataInsertAllResponse"
}
cursor = hook.BigQueryBaseCursor(mock_service, 'project_id')
cursor.insert_all(project_id, dataset_id, table_id, rows)
method.assert_called_with(projectId=project_id, datasetId=dataset_id,
tableId=table_id, body=body)
def test_insert_all_fail(self):
project_id = 'bq-project'
dataset_id = 'bq_dataset'
table_id = 'bq_table'
rows = [
{"json": {"a_key": "a_value_0"}}
]
mock_service = mock.Mock()
method = mock_service.tabledata.return_value.insertAll
method.return_value.execute.return_value = {
"kind": "bigquery#tableDataInsertAllResponse",
"insertErrors": [
{
"index": 1,
"errors": []
}
]
}
cursor = hook.BigQueryBaseCursor(mock_service, 'project_id')
with self.assertRaises(Exception):
cursor.insert_all(project_id, dataset_id, table_id,
rows, fail_on_error=True)
class TestTableOperations(unittest.TestCase):
def test_create_view_fails_on_exception(self):
project_id = 'bq-project'
dataset_id = 'bq_dataset'
table_id = 'bq_table_view'
view = {
'incorrect_key': 'SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*`',
"useLegacySql": False
}
mock_service = mock.Mock()
method = mock_service.tables.return_value.insert
method.return_value.execute.side_effect = HttpError(
resp={'status': '400'}, content=b'Query is required for views')
cursor = hook.BigQueryBaseCursor(mock_service, project_id)
with self.assertRaises(Exception):
cursor.create_empty_table(project_id, dataset_id, table_id,
view=view)
def test_create_view(self):
project_id = 'bq-project'
dataset_id = 'bq_dataset'
table_id = 'bq_table_view'
view = {
'query': 'SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*`',
"useLegacySql": False
}
mock_service = mock.Mock()
method = mock_service.tables.return_value.insert
cursor = hook.BigQueryBaseCursor(mock_service, project_id)
cursor.create_empty_table(project_id, dataset_id, table_id,
view=view)
body = {
'tableReference': {
'tableId': table_id
},
'view': view
}
method.assert_called_once_with(projectId=project_id, datasetId=dataset_id, body=body)
def test_patch_table(self):
project_id = 'bq-project'
dataset_id = 'bq_dataset'
table_id = 'bq_table'
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'}
]
time_partitioning_patched = {
'expirationMs': 10000000
}
require_partition_filter_patched = True
mock_service = mock.Mock()
method = (mock_service.tables.return_value.patch)
cursor = hook.BigQueryBaseCursor(mock_service, project_id)
cursor.patch_table(
dataset_id, table_id, project_id,
description=description_patched,
expiration_time=expiration_time_patched,
friendly_name=friendly_name_patched,
labels=labels_patched, schema=schema_patched,
time_partitioning=time_partitioning_patched,
require_partition_filter=require_partition_filter_patched
)
body = {
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {
"fields": schema_patched
},
"timePartitioning": time_partitioning_patched,
"requirePartitionFilter": require_partition_filter_patched
}
method.assert_called_once_with(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body
)
def test_patch_view(self):
project_id = 'bq-project'
dataset_id = 'bq_dataset'
view_id = 'bq_view'
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False
}
mock_service = mock.Mock()
method = (mock_service.tables.return_value.patch)
cursor = hook.BigQueryBaseCursor(mock_service, project_id)
cursor.patch_table(dataset_id, view_id, project_id, view=view_patched)
body = {
'view': view_patched
}
method.assert_called_once_with(
projectId=project_id,
datasetId=dataset_id,
tableId=view_id,
body=body
)
def test_create_empty_table_succeed(self):
project_id = 'bq-project'
dataset_id = 'bq_dataset'
table_id = 'bq_table'
mock_service = mock.Mock()
method = mock_service.tables.return_value.insert
cursor = hook.BigQueryBaseCursor(mock_service, project_id)
cursor.create_empty_table(
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id)
body = {
'tableReference': {
'tableId': table_id
}
}
method.assert_called_once_with(
projectId=project_id,
datasetId=dataset_id,
body=body
)
def test_create_empty_table_with_extras_succeed(self):
project_id = 'bq-project'
dataset_id = 'bq_dataset'
table_id = 'bq_table'
schema_fields = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'created', 'type': 'DATE', 'mode': 'REQUIRED'},
]
time_partitioning = {"field": "created", "type": "DAY"}
cluster_fields = ['name']
mock_service = mock.Mock()
method = mock_service.tables.return_value.insert
cursor = hook.BigQueryBaseCursor(mock_service, project_id)
cursor.create_empty_table(
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
schema_fields=schema_fields,
time_partitioning=time_partitioning,
cluster_fields=cluster_fields
)
body = {
'tableReference': {
'tableId': table_id
},
'schema': {
'fields': schema_fields
},
'timePartitioning': time_partitioning,
'clustering': {
'fields': cluster_fields
}
}
method.assert_called_once_with(
projectId=project_id,
datasetId=dataset_id,
body=body
)
def test_create_empty_table_on_exception(self):
project_id = 'bq-project'
dataset_id = 'bq_dataset'
table_id = 'bq_table'
mock_service = mock.Mock()
method = mock_service.tables.return_value.insert
method.return_value.execute.side_effect = HttpError(
resp={'status': '400'}, content=b'Bad request')
cursor = hook.BigQueryBaseCursor(mock_service, project_id)
with self.assertRaises(Exception):
cursor.create_empty_table(project_id, dataset_id, table_id)
class TestBigQueryCursor(unittest.TestCase):
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_execute_with_parameters(self, mocked_rwc):
hook.BigQueryCursor("test", "test").execute(
"SELECT %(foo)s", {"foo": "bar"})
mocked_rwc.assert_called_once()
class TestLabelsInRunJob(unittest.TestCase):
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_with_arg(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['labels'], {'label1': 'test1', 'label2': 'test2'}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
labels={'label1': 'test1', 'label2': 'test2'}
)
mocked_rwc.assert_called_once()
class TestDatasetsOperations(unittest.TestCase):
def test_create_empty_dataset_no_dataset_id_err(self):
with self.assertRaises(ValueError):
hook.BigQueryBaseCursor(
mock.Mock(), "test_create_empty_dataset").create_empty_dataset(
dataset_id="", project_id="")
def test_create_empty_dataset_duplicates_call_err(self):
with self.assertRaises(ValueError):
hook.BigQueryBaseCursor(
mock.Mock(), "test_create_empty_dataset").create_empty_dataset(
dataset_id="", project_id="project_test",
dataset_reference={
"datasetReference":
{"datasetId": "test_dataset",
"projectId": "project_test2"}})
def test_get_dataset_without_dataset_id(self):
with mock.patch.object(hook.BigQueryHook, 'get_service'):
with self.assertRaises(ValueError):
hook.BigQueryBaseCursor(
mock.Mock(), "test_create_empty_dataset").get_dataset(
dataset_id="", project_id="project_test")
def test_get_dataset(self):
expected_result = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {
"projectId": "your-project",
"datasetId": "dataset_2_test"
}
}
dataset_id = "test_dataset"
project_id = "project_test"
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
with mock.patch.object(bq_hook.service, 'datasets') as MockService:
MockService.return_value.get(datasetId=dataset_id,
projectId=project_id).execute.\
return_value = expected_result
result = bq_hook.get_dataset(dataset_id=dataset_id,
project_id=project_id)
self.assertEqual(result, expected_result)
def test_get_datasets_list(self):
expected_result = {'datasets': [
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {
"projectId": "your-project",
"datasetId": "dataset_2_test"
}
},
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_1_test",
"datasetReference": {
"projectId": "your-project",
"datasetId": "dataset_1_test"
}
}
]}
project_id = "project_test"''
mocked = mock.Mock()
with mock.patch.object(hook.BigQueryBaseCursor(mocked, project_id).service,
'datasets') as MockService:
MockService.return_value.list(
projectId=project_id).execute.return_value = expected_result
result = hook.BigQueryBaseCursor(
mocked, "test_create_empty_dataset").get_datasets_list(
project_id=project_id)
self.assertEqual(result, expected_result['datasets'])
class TestTimePartitioningInRunJob(unittest.TestCase):
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_load_default(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertIsNone(config['load'].get('timePartitioning'))
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
mocked_rwc.assert_called_once()
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_with_auto_detect(self, run_with_config):
destination_project_dataset_table = "autodetect.table"
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
cursor.run_load(destination_project_dataset_table, [], [], autodetect=True)
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['load']['autodetect'], True)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_load_with_arg(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['load']['timePartitioning'],
{
'field': 'test_field',
'type': 'DAY',
'expirationMs': 1000
}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
mocked_rwc.assert_called_once()
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_default(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertIsNone(config['query'].get('timePartitioning'))
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(sql='select 1')
mocked_rwc.assert_called_once()
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_with_arg(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['query']['timePartitioning'],
{
'field': 'test_field',
'type': 'DAY',
'expirationMs': 1000
}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
time_partitioning={'type': 'DAY',
'field': 'test_field', 'expirationMs': 1000}
)
mocked_rwc.assert_called_once()
def test_dollar_makes_partition(self):
tp_out = _cleanse_time_partitioning('test.teast$20170101', {})
expect = {
'type': 'DAY'
}
self.assertEqual(tp_out, expect)
def test_extra_time_partitioning_options(self):
tp_out = _cleanse_time_partitioning(
'test.teast',
{'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
expect = {
'type': 'DAY',
'field': 'test_field',
'expirationMs': 1000
}
self.assertEqual(tp_out, expect)
class TestClusteringInRunJob(unittest.TestCase):
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_load_default(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertIsNone(config['load'].get('clustering'))
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
mocked_rwc.assert_called_once()
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_load_with_arg(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['load']['clustering'],
{
'fields': ['field1', 'field2']
}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'}
)
mocked_rwc.assert_called_once()
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_default(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertIsNone(config['query'].get('clustering'))
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(sql='select 1')
mocked_rwc.assert_called_once()
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_with_arg(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['query']['clustering'],
{
'fields': ['field1', 'field2']
}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'}
)
mocked_rwc.assert_called_once()
class TestBigQueryHookLegacySql(unittest.TestCase):
"""Ensure `use_legacy_sql` param in `BigQueryHook` propagates properly."""
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_hook_uses_legacy_sql_by_default(self, run_with_config):
with mock.patch.object(hook.BigQueryHook, 'get_service'):
bq_hook = hook.BigQueryHook()
bq_hook.get_first('query')
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], True)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_legacy_sql_override_propagates_properly(self, run_with_config):
with mock.patch.object(hook.BigQueryHook, 'get_service'):
bq_hook = hook.BigQueryHook(use_legacy_sql=False)
bq_hook.get_first('query')
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], False)
class TestBigQueryHookLocation(unittest.TestCase):
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_location_propagates_properly(self, run_with_config):
with mock.patch.object(hook.BigQueryHook, 'get_service'):
bq_hook = hook.BigQueryHook(location=None)
self.assertIsNone(bq_hook.location)
bq_cursor = hook.BigQueryBaseCursor(mock.Mock(),
'test-project',
location=None)
self.assertIsNone(bq_cursor.location)
bq_cursor.run_query(sql='select 1', location='US')
run_with_config.assert_called_once()
self.assertEqual(bq_cursor.location, 'US')
class TestBigQueryHookRunWithConfiguration(unittest.TestCase):
def test_run_with_configuration_location(self):
project_id = 'bq-project'
running_job_id = 'job_vjdi28vskdui2onru23'
location = 'asia-east1'
mock_service = mock.Mock()
method = (mock_service.jobs.return_value.get)
mock_service.jobs.return_value.insert.return_value.execute.return_value = {
'jobReference': {
'jobId': running_job_id,
'location': location
}
}
mock_service.jobs.return_value.get.return_value.execute.return_value = {
'status': {
'state': 'DONE'
}
}
cursor = hook.BigQueryBaseCursor(mock_service, project_id)
cursor.running_job_id = running_job_id
cursor.run_with_configuration({})
method.assert_called_once_with(
projectId=project_id,
jobId=running_job_id,
location=location
)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
thientu/scikit-learn
|
examples/svm/plot_custom_kernel.py
|
171
|
1546
|
"""
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
LuciaWyn/csvFlaskPython
|
data.py
|
1
|
7348
|
from flask import Flask, render_template, request
from flask import jsonify
import pandas as pd
from IPython.display import HTML
app = Flask(__name__)
@app.route('/')
def index():
#return "hello"
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
#df = df.groupby(by=['Name'])['Address1'].sum().to_frame()
# df=df = df.drop(['Unnamed: 11'],axis=1)
#df = df['Name']
#df= "<<p>>dfd<</p>>"
name = df['Name']
ad1 = df['Address1']
dfh = df.to_json(orient='records')
l = len(df.index)
ll = l-1
#dfh = df.to_html()
#df = df.values
#dfh = df.to_json()
#return render_template('index.html', lock=dfh)
#return render_template('index.html', tables=df)
return render_template('index1.html', jd=dfh, ct=ll, p="Home")
#return dfn
@app.route('/rent')
def rent():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
dfl = df['Rent'].sum();
dfl = "£"+str(dfl)
#return df.to_html()
return render_template('index2.html', t=dfl, p="Total Rent")
@app.route('/lease')
def lease():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
#df = df.drop(['Unnamed: 11'],axis=1)
df= df.sort_values('Rent', axis=0, ascending=False)
#return df.to_html()
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('index1.html', jd=dfh, ct=ll, p="Leases in Ascending Order")
@app.route('/top5')
def top5():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
#df = df.drop(['Unnamed: 11'],axis=1)
df = df.head()
#df= df.sort_values('Rent', axis=0, ascending=False)
#return df.to_html()
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('index3.html', jd=dfh, ct = ll, p="Top 5 results")
@app.route('/top5/asc')
def top5asc():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
#df = df.drop(['Unnamed: 11'],axis=1)
df = df.head()
df= df.sort_values('Rent', axis=0, ascending=False)
#return df.to_html()
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('index3.html', jd=dfh, ct = ll, p="Top 5 results in Ascending order by Rent")
@app.route('/top5/dec')
def top5dec():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
df = df.head()
df= df.sort_values('Rent', axis=0, ascending=True)
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('index3.html', jd=dfh, ct = ll, p="Top 5 results in Ascending order by Rent")
@app.route('/tenants')
def tenants():
#l = "<p>Tenants List</p>"
#l = l+"<a href=""/tenants/ASL"">Arqiva Services ltd</a>"
return render_template('tennants1.html')
@app.route('/tenants/ASL')
def asl():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
df = df.loc[df['Tenant'] == 'Arqiva Services ltd']
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('tennants.html', jd=dfh, ct=ll, cct=l, p="Arqiva Services ltd")
@app.route('/tenants/Vodafone')
def vl():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
df = df.loc[df['Tenant'] == 'Vodafone Ltd']
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('tennants.html', jd=dfh, ct=ll, cct=l, p="Vodafone Ltd")
@app.route('/tenants/O2')
def ol():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
df = df.loc[df['Tenant'] == 'O2 (UK) Ltd']
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('tennants.html', jd=dfh, ct=ll, cct=l, p="O2 (UK) Ltd")
@app.route('/tenants/EEL')
def eel():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
df = df[df['Tenant'].str.contains('Everything Everywhere Ltd')]
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('tennants.html', jd=dfh, ct=ll, cct=l, p="Everything Everywhere Ltd")
@app.route('/tenants/Hutchinson')
def hutch():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
df = df[df['Tenant'].str.contains('Hutchinson')]
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('tennants.html', jd=dfh, ct=ll, cct=l, p="Hutchinson")
@app.route('/tenants/CTI')
def cti():
df = pd.read_csv('MobilePhoneMasts.csv')
df = df.dropna(how='all')
df = df.loc[df['Tenant'] == 'Cornerstone Telecommunications Infrastructure']
l = len(df.index)
ll = l-1
dfh = df.to_json(orient='records')
return render_template('tennants.html', jd=dfh, ct=ll, cct=l, p="Cornerstone Telecommunications Infrastructure")
@app.route('/times')
def times():
df1 = pd.read_csv('MobilePhoneMasts.csv')
df1 = df1.dropna(how='all')
#df1['Start'] = df1['LeaseStart'].dt.strftime('%d/%m/%Y')
df1['Start'] = pd.to_datetime(df1.LeaseStart)
df1['End'] = pd.to_datetime(df1.LeaseEnd)
ts = pd.to_datetime('31/05/1999')
td = pd.to_datetime('31/08/2007')
#df1['Start'] = pd.to_datetime(df1['Start'])
df2 = df1.loc[(df1.Start >= ts) & (df1.Start <= td)]
df2['Start'] = df2['Start'].dt.strftime('%d/%m/%Y')
df2['End'] = df2['End'].dt.strftime('%d/%m/%Y')
df2 = df2.drop(['LeaseStart'],axis=1)
df2 = df2.drop(['LeaseEnd'],axis=1)
l = len(df2.index)
ll = l-1
dfh = df2.to_json(orient='records')
return render_template('index1.html', jd=dfh, ct=ll, p="From 1/06/99 to 31/08/07")
@app.route('/add')
def add():
return render_template('form.html')
@app.route('/added', methods=['POST'])
def added():
name = request.form['name']
ad1 = request.form['adr1']
ad2 = request.form['adr2']
ad3 = request.form['adr3']
ad4 = request.form['adr4']
sdd = request.form['sd']
u = request.form['unit']
t = request.form['tenant']
y = request.form['year']
r = request.form['rent']
r = str(r)
ssdd = str(sdd)
if(ad1 is None):
ad1 = ''
if(ad2 is None):
ad2 = ''
if(ad3 is None):
ad3 = ''
if(ad4 is None):
ad4 = ''
if (len(sdd)==1):
ssdd="0"+ssdd
sdy = request.form['sy']
ssdy = str(sdy)
if(len(ssdy)!=2):
ssdy = (ssdy[-2:])
sdate = ssdd+"-"+request.form['sm']+"-"+ssdy
edd = request.form['ed']
eedd = str(edd)
if (len(eedd)==1):
eedd="0"+eedd
edy = request.form['ey']
eedy = str(edy)
if(len(eedy)!=2):
eedy = (eedy[-2:])
edate = eedd+"-"+request.form['em']+"-"+eedy
import csv
with open('MobilePhoneMasts.csv', 'a') as newFile:
newFileWriter = csv.writer(newFile)
newFileWriter.writerow([name, ad1, ad2, ad3,ad4, u, t, sdate, edate, y, r])
return render_template('index2.html', t="<p>Data sucessfully added</p>", p="Added")
if __name__ == '__main__':
app.run()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.