repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
davidgbe/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
endolith/scikit-image | doc/ext/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
ky822/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
chloeyangu/BigDataAnalytics | Terrorisks/flaskr/flaskr/BT4221.py | 1 | 3372 |
# coding: utf-8
# In[6]:
# # Prelim Analysis
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import roc_curve, auc
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import pandas as pd
from random import randint
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
#import the data and rename the columns
d = pd.read_csv('file.csv', encoding='ISO-8859-1',low_memory=False)#, usecols=[0, 1, 2, 3, 8, 11, 13, 14, 26, 29, 35, 37, 84, 100, 103])
d = d.rename(columns={'eventid':'id', 'iyear':'year', 'imonth':'month', 'iday':'day', 'country_txt':'country', 'provstate':'state', 'success':'success','targtype1_txt':'target', 'targsubtype1_txt' : 'targetsub','weaptype1_txt':'weapon', 'attacktype1_txt':'attack','nkill':'fatalities', 'nwound':'injuries'})
d = d.drop(['id'],axis=1)
df_num = d.select_dtypes(include=[np.number])
df_inf = df_num.replace([np.inf, -np.inf], np.nan)
df_inf.replace([np.inf, -np.inf], np.nan)
df_filled = df_inf.fillna(0)
df_filled = df_filled.drop(['success'],axis=1)
# In[70]:
df_filled.corr().abs()
from numpy import float32
#df_filled.head()
df_transformed = df_filled.astype(float32)
#df_transformed.info()
# In[17]:
lm = LinearRegression()
y = d['success']
X = df_filled[['month', 'day','region','property','propextent','attacktype1','weaptype1','nperps','specificity' ]]
X_train, X_test,y_train, y_test = train_test_split(X,y,random_state=2)
#print(X_train.head())
lm.fit(X_train, y_train)
r = lm.score(X_train, y_train)
#print (r)
pred_train = lm.predict(X_train)
pred_test = lm.predict(X_test)
# In[18]:
#Random Forest
y_random = df_filled['multiple']
X_random = df_filled[['month', 'day','region','property','propextent','attacktype1','weaptype1','nperps','specificity' ]]
features_train, features_test,target_train, target_test = train_test_split(X_random,y_random, test_size = 0.2,random_state=0)
#Random Forest
forest=RandomForestClassifier(n_estimators=10)
forest = forest.fit( features_train, target_train)
output = forest.predict(features_test).astype(int)
forest.score(features_train, target_train )
# In[15]:
from patsy import dmatrices
from sklearn.linear_model import LogisticRegression
#Logistic Regression
y_log, X_log = dmatrices('multiple ~ month + day + region + property + propextent + attacktype1 + weaptype1+ nperps + specificity', df_filled, return_type="dataframe")
y_log = np.ravel(y_log)
# instantiate a logistic regression model, and fit with X and y
model = LogisticRegression()
model = model.fit(X_log, y_log)
X_train_log, X_test_log, y_train_log, y_test_log = train_test_split(X_log, y_log, test_size=0.3, random_state=0)
model2 = LogisticRegression()
model2.fit(X_train, y_train)
def plots(lm,rf,lr):
counter=randint(1,1000)
import matplotlib.pyplot as plt
results = []
results.append(lm[0])
results.append(lr[0][1])
results.append(rf[0][0])
N = len(results)
x = [1,2,3]
#y=['Linear Regression', 'Logistic Regression', 'Random Forest']
width = 1/1.5
plt.bar(x,results,width,color = "green",align='center')
plt.savefig("static/images/fig"+str(counter)+".png")
plt.clf()
return "static/images/fig"+str(counter)+".png"
| mit |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/io/tests/test_excel.py | 1 | 61255 | # pylint: disable=E1101
from pandas.compat import u, range, map, openpyxl_compat
from datetime import datetime, date, time
import sys
import os
from distutils.version import LooseVersion
import operator
import functools
import nose
from numpy import nan
import numpy as np
from numpy.testing.decorators import slow
from pandas import DataFrame, Index, MultiIndex
from pandas.io.parsers import read_csv
from pandas.io.excel import (
ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _Openpyxl1Writer,
_Openpyxl2Writer, register_writer, _XlsxWriter
)
from pandas.io.common import URLError
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option, get_option
import pandas.util.testing as tm
import pandas as pd
def _skip_if_no_xlrd():
try:
import xlrd
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
raise nose.SkipTest('xlrd < 0.9, skipping')
except ImportError:
raise nose.SkipTest('xlrd not installed, skipping')
def _skip_if_no_xlwt():
try:
import xlwt # NOQA
except ImportError:
raise nose.SkipTest('xlwt not installed, skipping')
def _skip_if_no_openpyxl():
try:
import openpyxl # NOQA
except ImportError:
raise nose.SkipTest('openpyxl not installed, skipping')
def _skip_if_no_xlsxwriter():
try:
import xlsxwriter # NOQA
except ImportError:
raise nose.SkipTest('xlsxwriter not installed, skipping')
def _skip_if_no_excelsuite():
_skip_if_no_xlrd()
_skip_if_no_xlwt()
_skip_if_no_openpyxl()
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class SharedItems(object):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
self.xlsx1 = os.path.join(self.dirpath, 'test.xlsx')
self.multisheet = os.path.join(self.dirpath, 'test_multisheet.xlsx')
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
class ExcelReaderTests(SharedItems, tm.TestCase):
def test_parse_cols_int(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['xls', 'xlsx', 'xlsm']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=3)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols=3)
# TODO add index to xls file)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_list(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['xls', 'xlsx', 'xlsm']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=[0, 2, 3])
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols=[0, 2, 3])
# TODO add index to xls file)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_str(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['xls', 'xlsx', 'xlsm']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols='A:D')
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C,D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C,D')
# TODO add index to xls file
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C:D')
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_excel_stop_iterator(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test3.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_passes_na(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xlsx'))
parsed = excel_data.parse('Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = excel_data.parse('Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def check_excel_table_sheet_by_index(self, filename, csvfile):
import xlrd
pth = os.path.join(self.dirpath, filename)
xls = ExcelFile(pth)
df = xls.parse(0, index_col=0, parse_dates=True)
df2 = self.read_csv(csvfile, index_col=0, parse_dates=True)
df3 = xls.parse(1, skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xls.parse(0, index_col=0, parse_dates=True, skipfooter=1)
df5 = xls.parse(0, index_col=0, parse_dates=True, skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
self.assertRaises(xlrd.XLRDError, xls.parse, 'asdf')
def test_excel_table_sheet_by_index(self):
_skip_if_no_xlrd()
for filename, csvfile in [(self.xls1, self.csv1),
(self.xlsx1, self.csv1)]:
self.check_excel_table_sheet_by_index(filename, csvfile)
def test_excel_table(self):
_skip_if_no_xlrd()
pth = os.path.join(self.dirpath, 'test.xls')
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
def test_excel_read_buffer(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xls')
f = open(pth, 'rb')
xls = ExcelFile(f)
# it works
xls.parse('Sheet1', index_col=0, parse_dates=True)
pth = os.path.join(self.dirpath, 'test.xlsx')
f = open(pth, 'rb')
xl = ExcelFile(f)
xl.parse('Sheet1', index_col=0, parse_dates=True)
def test_read_xlrd_Book(self):
_skip_if_no_xlrd()
_skip_if_no_xlwt()
import xlrd
df = self.frame
with ensure_clean('.xls') as pth:
df.to_excel(pth, "SheetA")
book = xlrd.open_workbook(pth)
with ExcelFile(book, engine="xlrd") as xl:
result = xl.parse("SheetA")
tm.assert_frame_equal(df, result)
result = read_excel(book, sheetname="SheetA", engine="xlrd")
tm.assert_frame_equal(df, result)
@tm.network
def test_read_from_http_url(self):
_skip_if_no_xlrd()
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/test.xlsx')
url_table = read_excel(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'test.xlsx')
local_table = read_excel(localtable)
tm.assert_frame_equal(url_table, local_table)
@slow
def test_read_from_file_url(self):
_skip_if_no_xlrd()
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'test.xlsx')
local_table = read_excel(localtable)
try:
url_table = read_excel('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_xlsx_table(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xlsx')
xlsx = ExcelFile(pth)
df = xlsx.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xlsx.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
# TODO add index to xlsx file
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
def test_reader_closes_file(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xlsx')
f = open(pth, 'rb')
with ExcelFile(f) as xlsx:
# parses okay
xlsx.parse('Sheet1', index_col=0)
self.assertTrue(f.closed)
def test_reader_special_dtypes(self):
_skip_if_no_xlrd()
expected = DataFrame.from_items([
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
datetime(1905, 1, 1), datetime(2013, 12, 14),
datetime(2015, 3, 14)])
])
xlsx_path = os.path.join(self.dirpath, 'test_types.xlsx')
xls_path = os.path.join(self.dirpath, 'test_types.xls')
# should read in correctly and infer types
for path in (xls_path, xlsx_path):
actual = read_excel(path, 'Sheet1')
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[1, "Str2Col"] = 3.0
for path in (xls_path, xlsx_path):
actual = read_excel(path, 'Sheet1', convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = read_excel(xlsx_path, 'Sheet1', index_col=icol)
actual2 = read_excel(xlsx_path, 'Sheet1', index_col=name)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
tm.assert_frame_equal(actual2, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str})
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str},
convert_float=False)
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self):
_skip_if_no_xlrd()
expected = DataFrame.from_items([
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
("StrCol", ['1', np.nan, '3', '4', '5']),
])
converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
'FloatCol': lambda x: 10 * x if x else np.nan,
2: lambda x: 'Found' if x != '' else 'Not found',
3: lambda x: str(x) if x else '',
}
xlsx_path = os.path.join(self.dirpath, 'test_converters.xlsx')
xls_path = os.path.join(self.dirpath, 'test_converters.xls')
# should read in correctly and set types of single cells (not array dtypes)
for path in (xls_path, xlsx_path):
actual = read_excel(path, 'Sheet1', converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
_skip_if_no_xlrd()
dfs = read_excel(self.multisheet,sheetname=None)
expected_keys = ['Alpha','Beta','Charlie']
tm.assert_contains_all(expected_keys,dfs.keys())
def test_reading_multiple_specific_sheets(self):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
_skip_if_no_xlrd()
#Explicitly request duplicates. Only the set should be returned.
expected_keys = [2,'Charlie','Charlie']
dfs = read_excel(self.multisheet,sheetname=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys,dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_creating_and_reading_multiple_sheets(self):
# Test reading multiple sheets, from a runtime created excel file
# with multiple sheets.
# See PR #9450
_skip_if_no_xlrd()
_skip_if_no_xlwt()
def tdf(sheetname):
d, i = [11,22,33], [1,2,3]
return DataFrame(d,i,columns=[sheetname])
sheets = ['AAA','BBB','CCC']
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets,dfs))
with ensure_clean('.xlsx') as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in dfs.iteritems():
df.to_excel(ew,sheetname)
dfs_returned = pd.read_excel(pth,sheetname=sheets)
for s in sheets:
tm.assert_frame_equal(dfs[s],dfs_returned[s])
def test_reader_seconds(self):
# Test reading times with and without milliseconds. GH5945.
_skip_if_no_xlrd()
import xlrd
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
# Xlrd >= 0.9.3 can handle Excel milliseconds.
expected = DataFrame.from_items([("Time",
[time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54)])])
else:
# Xlrd < 0.9.3 rounds Excel milliseconds.
expected = DataFrame.from_items([("Time",
[time(1, 2, 3),
time(2, 45, 56),
time(4, 29, 49),
time(6, 13, 42),
time(7, 57, 35),
time(9, 41, 29),
time(11, 25, 22),
time(13, 9, 15),
time(14, 53, 8),
time(16, 37, 1),
time(18, 20, 54)])])
epoch_1900 = os.path.join(self.dirpath, 'times_1900.xls')
epoch_1904 = os.path.join(self.dirpath, 'times_1904.xls')
actual = read_excel(epoch_1900, 'Sheet1')
tm.assert_frame_equal(actual, expected)
actual = read_excel(epoch_1904, 'Sheet1')
tm.assert_frame_equal(actual, expected)
class ExcelWriterBase(SharedItems):
# Base class for test cases to run with different Excel writers.
# To add a writer test, define the following:
# 1. A check_skip function that skips your tests if your writer isn't
# installed.
# 2. Add a property ext, which is the file extension that your writer
# writes to. (needs to start with '.' so it's a valid path)
# 3. Add a property engine_name, which is the name of the writer class.
# Test with MultiIndex and Hierarchical Rows as merged cells.
merge_cells = True
def setUp(self):
self.check_skip()
super(ExcelWriterBase, self).setUp()
self.option_name = 'io.excel.%s.writer' % self.ext.strip('.')
self.prev_engine = get_option(self.option_name)
set_option(self.option_name, self.engine_name)
def tearDown(self):
set_option(self.option_name, self.prev_engine)
def test_excel_sheet_by_name_raise(self):
_skip_if_no_xlrd()
import xlrd
with ensure_clean(self.ext) as pth:
gt = DataFrame(np.random.randn(10, 2))
gt.to_excel(pth)
xl = ExcelFile(pth)
df = xl.parse(0)
tm.assert_frame_equal(gt, df)
self.assertRaises(xlrd.XLRDError, xl.parse, '0')
def test_excelwriter_contextmanager(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as pth:
with ExcelWriter(pth) as writer:
self.frame.to_excel(writer, 'Data1')
self.frame2.to_excel(writer, 'Data2')
with ExcelFile(pth) as reader:
found_df = reader.parse('Data1')
found_df2 = reader.parse('Data2')
tm.assert_frame_equal(found_df, self.frame)
tm.assert_frame_equal(found_df2, self.frame2)
def test_roundtrip(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# test roundtrip
self.frame.to_excel(path, 'test1')
recons = read_excel(path, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', index=False)
recons = read_excel(path, 'test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', na_rep='NA')
recons = read_excel(path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
# GH 3611
self.frame.to_excel(path, 'test1', na_rep='88')
recons = read_excel(path, 'test1', index_col=0, na_values=['88'])
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, 'test1', na_rep='88')
recons = read_excel(path, 'test1', index_col=0,
na_values=[88, 88.0])
tm.assert_frame_equal(self.frame, recons)
# GH 6573
self.frame.to_excel(path, 'Sheet1')
recons = read_excel(path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(path, '0')
recons = read_excel(path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
def test_mixed(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.mixed_frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0)
tm.assert_frame_equal(self.mixed_frame, recons)
def test_tsframe(self):
_skip_if_no_xlrd()
df = tm.makeTimeDataFrame()[:5]
with ensure_clean(self.ext) as path:
df.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1')
tm.assert_frame_equal(df, recons)
def test_basics_with_nan(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
def test_int_types(self):
_skip_if_no_xlrd()
for np_type in (np.int8, np.int16, np.int32, np.int64):
with ensure_clean(self.ext) as path:
# Test np.int values read come back as int (rather than float
# which is Excel's format).
frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
dtype=np_type)
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1')
int_frame = frame.astype(np.int64)
tm.assert_frame_equal(int_frame, recons)
recons2 = read_excel(path, 'test1')
tm.assert_frame_equal(int_frame, recons2)
# test with convert_float=False comes back as float
float_frame = frame.astype(float)
recons = read_excel(path, 'test1', convert_float=False)
tm.assert_frame_equal(recons, float_frame)
def test_float_types(self):
_skip_if_no_xlrd()
for np_type in (np.float16, np.float32, np.float64):
with ensure_clean(self.ext) as path:
# Test np.float values read come back as float.
frame = DataFrame(np.random.random_sample(10), dtype=np_type)
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1').astype(np_type)
tm.assert_frame_equal(frame, recons, check_dtype=False)
def test_bool_types(self):
_skip_if_no_xlrd()
for np_type in (np.bool8, np.bool_):
with ensure_clean(self.ext) as path:
# Test np.bool values read come back as float.
frame = (DataFrame([1, 0, True, False], dtype=np_type))
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1').astype(np_type)
tm.assert_frame_equal(frame, recons)
def test_inf_roundtrip(self):
_skip_if_no_xlrd()
frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
with ensure_clean(self.ext) as path:
frame.to_excel(path, 'test1')
reader = ExcelFile(path)
recons = reader.parse('test1')
tm.assert_frame_equal(frame, recons)
def test_sheets(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# Test writing to separate sheets
writer = ExcelWriter(path)
self.frame.to_excel(writer, 'test1')
self.tsframe.to_excel(writer, 'test2')
writer.save()
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = reader.parse('test2', index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
np.testing.assert_equal(2, len(reader.sheet_names))
np.testing.assert_equal('test1', reader.sheet_names[0])
np.testing.assert_equal('test2', reader.sheet_names[1])
def test_colaliases(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_excel(path, 'test1', header=col_aliases)
reader = ExcelFile(path)
rs = reader.parse('test1', index_col=0)
xp = self.frame2.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_roundtrip_indexlabels(self):
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
self.frame['A'][:5] = nan
self.frame.to_excel(path, 'test1')
self.frame.to_excel(path, 'test1', columns=['A', 'B'])
self.frame.to_excel(path, 'test1', header=False)
self.frame.to_excel(path, 'test1', index=False)
# test index_label
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(path, 'test1',
index_label=['test'],
merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=0,
has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(path,
'test1',
index_label=['test', 'dummy', 'dummy2'],
merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=0,
has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(path,
'test1',
index_label='test',
merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=0,
has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
tm.assert_frame_equal(frame, recons.astype(bool))
with ensure_clean(self.ext) as path:
self.frame.to_excel(path,
'test1',
columns=['A', 'B', 'C', 'D'],
index=False, merge_cells=self.merge_cells)
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
df = self.frame.copy()
df = df.set_index(['A', 'B'])
reader = ExcelFile(path)
recons = reader.parse('test1', index_col=[0, 1])
tm.assert_frame_equal(df, recons, check_less_precise=True)
def test_excel_roundtrip_indexname(self):
_skip_if_no_xlrd()
df = DataFrame(np.random.randn(10, 4))
df.index.name = 'foo'
with ensure_clean(self.ext) as path:
df.to_excel(path, merge_cells=self.merge_cells)
xf = ExcelFile(path)
result = xf.parse(xf.sheet_names[0],
index_col=0,
has_index_names=self.merge_cells)
tm.assert_frame_equal(result, df)
self.assertEqual(result.index.name, 'foo')
def test_excel_roundtrip_datetime(self):
_skip_if_no_xlrd()
# datetime.date, not sure what to test here exactly
tsf = self.tsframe.copy()
with ensure_clean(self.ext) as path:
tsf.index = [x.date() for x in self.tsframe.index]
tsf.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1')
tm.assert_frame_equal(self.tsframe, recons)
# GH4133 - excel output format strings
def test_excel_date_datetime_format(self):
_skip_if_no_xlrd()
df = DataFrame([[date(2014, 1, 31),
date(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=['DATE', 'DATETIME'], columns=['X', 'Y'])
df_expected = DataFrame([[datetime(2014, 1, 31),
datetime(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=['DATE', 'DATETIME'], columns=['X', 'Y'])
with ensure_clean(self.ext) as filename1:
with ensure_clean(self.ext) as filename2:
writer1 = ExcelWriter(filename1)
writer2 = ExcelWriter(filename2,
date_format='DD.MM.YYYY',
datetime_format='DD.MM.YYYY HH-MM-SS')
df.to_excel(writer1, 'test1')
df.to_excel(writer2, 'test1')
writer1.close()
writer2.close()
reader1 = ExcelFile(filename1)
reader2 = ExcelFile(filename2)
rs1 = reader1.parse('test1', index_col=None)
rs2 = reader2.parse('test1', index_col=None)
tm.assert_frame_equal(rs1, rs2)
# since the reader returns a datetime object for dates, we need
# to use df_expected to check the result
tm.assert_frame_equal(rs2, df_expected)
def test_to_excel_periodindex(self):
_skip_if_no_xlrd()
frame = self.tsframe
xp = frame.resample('M', kind='period')
with ensure_clean(self.ext) as path:
xp.to_excel(path, 'sht1')
reader = ExcelFile(path)
rs = reader.parse('sht1', index_col=0, parse_dates=True)
tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self):
_skip_if_no_xlrd()
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
with ensure_clean(self.ext) as path:
frame.to_excel(path, 'test1', header=False)
frame.to_excel(path, 'test1', columns=['A', 'B'])
# round trip
frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
df = reader.parse('test1', index_col=[0, 1],
parse_dates=False,
has_index_names=self.merge_cells)
tm.assert_frame_equal(frame, df)
self.assertEqual(frame.index.names, df.index.names)
def test_to_excel_multiindex_dates(self):
_skip_if_no_xlrd()
# try multiindex with dates
tsframe = self.tsframe.copy()
new_index = [tsframe.index, np.arange(len(tsframe.index))]
tsframe.index = MultiIndex.from_arrays(new_index)
with ensure_clean(self.ext) as path:
tsframe.index.names = ['time', 'foo']
tsframe.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=[0, 1],
has_index_names=self.merge_cells)
tm.assert_frame_equal(tsframe, recons)
self.assertEqual(recons.index.names, ('time', 'foo'))
def test_to_excel_multiindex_no_write_index(self):
_skip_if_no_xlrd()
# Test writing and re-reading a MI witout the index. GH 5616.
# Initial non-MI frame.
frame1 = pd.DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
# Add a MI.
frame2 = frame1.copy()
multi_index = pd.MultiIndex.from_tuples([(70, 80), (90, 100)])
frame2.index = multi_index
with ensure_clean(self.ext) as path:
# Write out to Excel without the index.
frame2.to_excel(path, 'test1', index=False)
# Read it back in.
reader = ExcelFile(path)
frame3 = reader.parse('test1')
# Test that it is the same as the initial frame.
tm.assert_frame_equal(frame1, frame3)
def test_to_excel_float_format(self):
_skip_if_no_xlrd()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean(self.ext) as filename:
df.to_excel(filename, 'test1', float_format='%.2f')
reader = ExcelFile(filename)
rs = reader.parse('test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
def test_to_excel_output_encoding(self):
_skip_if_no_xlrd()
ext = self.ext
filename = '__tmp_to_excel_float_format__.' + ext
df = DataFrame([[u('\u0192'), u('\u0193'), u('\u0194')],
[u('\u0195'), u('\u0196'), u('\u0197')]],
index=[u('A\u0192'), 'B'], columns=[u('X\u0193'), 'Y', 'Z'])
with ensure_clean(filename) as filename:
df.to_excel(filename, sheet_name='TestSheet', encoding='utf8')
result = read_excel(filename, 'TestSheet', encoding='utf8')
tm.assert_frame_equal(result, df)
def test_to_excel_unicode_filename(self):
_skip_if_no_xlrd()
with ensure_clean(u('\u0192u.') + self.ext) as filename:
try:
f = open(filename, 'wb')
except UnicodeEncodeError:
raise nose.SkipTest('no unicode file names on this system')
else:
f.close()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
df.to_excel(filename, 'test1', float_format='%.2f')
reader = ExcelFile(filename)
rs = reader.parse('test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
# def test_to_excel_header_styling_xls(self):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import xlwt
# import xlrd
# except ImportError:
# raise nose.SkipTest
# filename = '__tmp_to_excel_header_styling_xls__.xls'
# pdf.to_excel(filename, 'test1')
# wbk = xlrd.open_workbook(filename,
# formatting_info=True)
# self.assertEqual(["test1"], wbk.sheet_names())
# ws = wbk.sheet_by_name('test1')
# self.assertEqual([(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)],
# ws.merged_cells)
# for i in range(0, 2):
# for j in range(0, 7):
# xfx = ws.cell_xf_index(0, 0)
# cell_xf = wbk.xf_list[xfx]
# font = wbk.font_list
# self.assertEqual(1, font[cell_xf.font_index].bold)
# self.assertEqual(1, cell_xf.border.top_line_style)
# self.assertEqual(1, cell_xf.border.right_line_style)
# self.assertEqual(1, cell_xf.border.bottom_line_style)
# self.assertEqual(1, cell_xf.border.left_line_style)
# self.assertEqual(2, cell_xf.alignment.hor_align)
# os.remove(filename)
# def test_to_excel_header_styling_xlsx(self):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import openpyxl
# from openpyxl.cell import get_column_letter
# except ImportError:
# raise nose.SkipTest
# if openpyxl.__version__ < '1.6.1':
# raise nose.SkipTest
# # test xlsx_styling
# filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
# pdf.to_excel(filename, 'test1')
# wbk = openpyxl.load_workbook(filename)
# self.assertEqual(["test1"], wbk.get_sheet_names())
# ws = wbk.get_sheet_by_name('test1')
# xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
# xlsaddrs += ["A%s" % i for i in range(1, 6)]
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
# self.assertTrue(cell.style.font.bold)
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.top.border_style)
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.right.border_style)
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.bottom.border_style)
# self.assertEqual(openpyxl.style.Border.BORDER_THIN,
# cell.style.borders.left.border_style)
# self.assertEqual(openpyxl.style.Alignment.HORIZONTAL_CENTER,
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
# self.assertTrue(ws.cell(maddr).merged)
# os.remove(filename)
def test_excel_010_hemstring(self):
_skip_if_no_xlrd()
if self.merge_cells:
raise nose.SkipTest('Skip tests for merged MI format.')
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of #2370 until sorted out in 0.11
def roundtrip(df, header=True, parser_hdr=0):
with ensure_clean(self.ext) as path:
df.to_excel(path, header=header, merge_cells=self.merge_cells)
xf = pd.ExcelFile(path)
res = xf.parse(xf.sheet_names[0], header=parser_hdr)
return res
nrows = 5
ncols = 3
for use_headers in (True, False):
for i in range(1, 4): # row multindex upto nlevel=3
for j in range(1, 4): # col ""
df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
#this if will be removed once multi column excel writing
#is implemented for now fixing #9794
if j>1:
with tm.assertRaises(NotImplementedError):
res = roundtrip(df, use_headers)
else:
res = roundtrip(df, use_headers)
if use_headers:
self.assertEqual(res.shape, (nrows, ncols + i))
else:
# first row taken as columns
self.assertEqual(res.shape, (nrows - 1, ncols + i))
# no nans
for r in range(len(res.index)):
for c in range(len(res.columns)):
self.assertTrue(res.ix[r, c] is not np.nan)
res = roundtrip(DataFrame([0]))
self.assertEqual(res.shape, (1, 1))
self.assertTrue(res.ix[0, 0] is not np.nan)
res = roundtrip(DataFrame([0]), False, None)
self.assertEqual(res.shape, (1, 2))
self.assertTrue(res.ix[0, 0] is not np.nan)
def test_duplicated_columns(self):
# Test for issue #5235.
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
colnames = ['A', 'B', 'B']
write_frame.columns = colnames
write_frame.to_excel(path, 'test1')
read_frame = read_excel(path, 'test1')
read_frame.columns = colnames
tm.assert_frame_equal(write_frame, read_frame)
def test_swapped_columns(self):
# Test for issue #5427.
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
write_frame.to_excel(path, 'test1', columns=['B', 'A'])
read_frame = read_excel(path, 'test1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
tm.assert_series_equal(write_frame['B'], read_frame['B'])
def test_datetimes(self):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
_skip_if_no_xlrd()
datetimes = [datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
datetime(2013, 1, 13, 4, 29, 49),
datetime(2013, 1, 13, 6, 13, 42),
datetime(2013, 1, 13, 7, 57, 35),
datetime(2013, 1, 13, 9, 41, 28),
datetime(2013, 1, 13, 11, 25, 21),
datetime(2013, 1, 13, 13, 9, 14),
datetime(2013, 1, 13, 14, 53, 7),
datetime(2013, 1, 13, 16, 37, 0),
datetime(2013, 1, 13, 18, 20, 52)]
with ensure_clean(self.ext) as path:
write_frame = DataFrame.from_items([('A', datetimes)])
write_frame.to_excel(path, 'Sheet1')
read_frame = read_excel(path, 'Sheet1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
def raise_wrapper(major_ver):
def versioned_raise_wrapper(orig_method):
@functools.wraps(orig_method)
def wrapped(self, *args, **kwargs):
_skip_if_no_openpyxl()
if openpyxl_compat.is_compat(major_ver=major_ver):
orig_method(self, *args, **kwargs)
else:
msg = 'Installed openpyxl is not supported at this time\. Use.+'
with tm.assertRaisesRegexp(ValueError, msg):
orig_method(self, *args, **kwargs)
return wrapped
return versioned_raise_wrapper
def raise_on_incompat_version(major_ver):
def versioned_raise_on_incompat_version(cls):
methods = filter(operator.methodcaller('startswith', 'test_'), dir(cls))
for method in methods:
setattr(cls, method, raise_wrapper(major_ver)(getattr(cls, method)))
return cls
return versioned_raise_on_incompat_version
@raise_on_incompat_version(1)
class OpenpyxlTests(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'openpyxl1'
check_skip = staticmethod(lambda *args, **kwargs: None)
def test_to_excel_styleconverter(self):
_skip_if_no_openpyxl()
if not openpyxl_compat.is_compat(major_ver=1):
raise nose.SkipTest('incompatiable openpyxl version')
import openpyxl
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xlsx_style = _Openpyxl1Writer._convert_to_style(hstyle)
self.assertTrue(xlsx_style.font.bold)
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.top.border_style)
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.right.border_style)
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.bottom.border_style)
self.assertEqual(openpyxl.style.Border.BORDER_THIN,
xlsx_style.borders.left.border_style)
self.assertEqual(openpyxl.style.Alignment.HORIZONTAL_CENTER,
xlsx_style.alignment.horizontal)
self.assertEqual(openpyxl.style.Alignment.VERTICAL_TOP,
xlsx_style.alignment.vertical)
@raise_on_incompat_version(2)
class Openpyxl2Tests(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'openpyxl2'
check_skip = staticmethod(lambda *args, **kwargs: None)
def test_to_excel_styleconverter(self):
_skip_if_no_openpyxl()
if not openpyxl_compat.is_compat(major_ver=2):
raise nose.SkipTest('incompatiable openpyxl version')
import openpyxl
from openpyxl import styles
hstyle = {
"font": {
"color": '00FF0000',
"bold": True,
},
"borders": {
"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin",
},
"alignment": {
"horizontal": "center",
"vertical": "top",
},
"fill": {
"patternType": 'solid',
'fgColor': {
'rgb': '006666FF',
'tint': 0.3,
},
},
"number_format": {
"format_code": "0.00"
},
"protection": {
"locked": True,
"hidden": False,
},
}
font_color = styles.Color('00FF0000')
font = styles.Font(bold=True, color=font_color)
side = styles.Side(style=styles.borders.BORDER_THIN)
border = styles.Border(top=side, right=side, bottom=side, left=side)
alignment = styles.Alignment(horizontal='center', vertical='top')
fill_color = styles.Color(rgb='006666FF', tint=0.3)
fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
# ahh openpyxl API changes
ver = openpyxl.__version__
if ver >= LooseVersion('2.0.0') and ver < LooseVersion('2.1.0'):
number_format = styles.NumberFormat(format_code='0.00')
else:
number_format = '0.00' # XXX: Only works with openpyxl-2.1.0
protection = styles.Protection(locked=True, hidden=False)
kw = _Openpyxl2Writer._convert_to_style_kwargs(hstyle)
self.assertEqual(kw['font'], font)
self.assertEqual(kw['border'], border)
self.assertEqual(kw['alignment'], alignment)
self.assertEqual(kw['fill'], fill)
self.assertEqual(kw['number_format'], number_format)
self.assertEqual(kw['protection'], protection)
def test_write_cells_merge_styled(self):
_skip_if_no_openpyxl()
if not openpyxl_compat.is_compat(major_ver=2):
raise nose.SkipTest('incompatiable openpyxl version')
from pandas.core.format import ExcelCell
from openpyxl import styles
sheet_name='merge_styled'
sty_b1 = {'font': {'color': '00FF0000'}}
sty_a2 = {'font': {'color': '0000FF00'}}
initial_cells = [
ExcelCell(col=1, row=0, val=42, style=sty_b1),
ExcelCell(col=0, row=1, val=99, style=sty_a2),
]
sty_merged = {'font': { 'color': '000000FF', 'bold': True }}
sty_kwargs = _Openpyxl2Writer._convert_to_style_kwargs(sty_merged)
openpyxl_sty_merged = styles.Style(**sty_kwargs)
merge_cells = [
ExcelCell(col=0, row=0, val='pandas',
mergestart=1, mergeend=1, style=sty_merged),
]
with ensure_clean('.xlsx') as path:
writer = _Openpyxl2Writer(path)
writer.write_cells(initial_cells, sheet_name=sheet_name)
writer.write_cells(merge_cells, sheet_name=sheet_name)
wks = writer.sheets[sheet_name]
xcell_b1 = wks.cell('B1')
xcell_a2 = wks.cell('A2')
self.assertEqual(xcell_b1.style, openpyxl_sty_merged)
self.assertEqual(xcell_a2.style, openpyxl_sty_merged)
class XlwtTests(ExcelWriterBase, tm.TestCase):
ext = '.xls'
engine_name = 'xlwt'
check_skip = staticmethod(_skip_if_no_xlwt)
def test_excel_raise_not_implemented_error_on_multiindex_columns(self):
_skip_if_no_xlwt()
#MultiIndex as columns is not yet implemented 9794
cols = pd.MultiIndex.from_tuples([('site',''),
('2014','height'),
('2014','weight')])
df = pd.DataFrame(np.random.randn(10,3), columns=cols)
with tm.assertRaises(NotImplementedError):
with ensure_clean(self.ext) as path:
df.to_excel(path, index=False)
def test_excel_multiindex_index(self):
_skip_if_no_xlwt()
#MultiIndex as index works so assert no error #9794
cols = pd.MultiIndex.from_tuples([('site',''),
('2014','height'),
('2014','weight')])
df = pd.DataFrame(np.random.randn(3,10), index=cols)
with ensure_clean(self.ext) as path:
df.to_excel(path, index=False)
def test_to_excel_styleconverter(self):
_skip_if_no_xlwt()
import xlwt
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xls_style = _XlwtWriter._convert_to_style(hstyle)
self.assertTrue(xls_style.font.bold)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.top)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.right)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.bottom)
self.assertEqual(xlwt.Borders.THIN, xls_style.borders.left)
self.assertEqual(xlwt.Alignment.HORZ_CENTER, xls_style.alignment.horz)
self.assertEqual(xlwt.Alignment.VERT_TOP, xls_style.alignment.vert)
class XlsxWriterTests(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'xlsxwriter'
check_skip = staticmethod(_skip_if_no_xlsxwriter)
def test_column_format(self):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
_skip_if_no_xlsxwriter()
import warnings
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
_skip_if_no_openpyxl()
import openpyxl
with ensure_clean(self.ext) as path:
frame = DataFrame({'A': [123456, 123456],
'B': [123456, 123456]})
writer = ExcelWriter(path)
frame.to_excel(writer)
# Add a number format to col B and ensure it is applied to cells.
num_format = '#,##0'
write_workbook = writer.book
write_worksheet = write_workbook.worksheets()[0]
col_format = write_workbook.add_format({'num_format': num_format})
write_worksheet.set_column('B:B', None, col_format)
writer.save()
read_workbook = openpyxl.load_workbook(path)
read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
# Get the number format from the cell. This method is backward
# compatible with older versions of openpyxl.
cell = read_worksheet.cell('B2')
try:
read_num_format = cell.style.number_format._format_code
except:
read_num_format = cell.style.number_format
self.assertEqual(read_num_format, num_format)
class OpenpyxlTests_NoMerge(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'openpyxl'
check_skip = staticmethod(_skip_if_no_openpyxl)
# Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows.
merge_cells = False
class XlwtTests_NoMerge(ExcelWriterBase, tm.TestCase):
ext = '.xls'
engine_name = 'xlwt'
check_skip = staticmethod(_skip_if_no_xlwt)
# Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows.
merge_cells = False
class XlsxWriterTests_NoMerge(ExcelWriterBase, tm.TestCase):
ext = '.xlsx'
engine_name = 'xlsxwriter'
check_skip = staticmethod(_skip_if_no_xlsxwriter)
# Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows.
merge_cells = False
class ExcelWriterEngineTests(tm.TestCase):
def test_ExcelWriter_dispatch(self):
with tm.assertRaisesRegexp(ValueError, 'No engine'):
ExcelWriter('nothing')
try:
import xlsxwriter
writer_klass = _XlsxWriter
except ImportError:
_skip_if_no_openpyxl()
if not openpyxl_compat.is_compat(major_ver=1):
raise nose.SkipTest('incompatible openpyxl version')
writer_klass = _Openpyxl1Writer
with ensure_clean('.xlsx') as path:
writer = ExcelWriter(path)
tm.assert_isinstance(writer, writer_klass)
_skip_if_no_xlwt()
with ensure_clean('.xls') as path:
writer = ExcelWriter(path)
tm.assert_isinstance(writer, _XlwtWriter)
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
supported_extensions = ['test', 'xlsx', 'xls']
engine = 'dummy'
def save(self):
called_save.append(True)
def write_cells(self, *args, **kwargs):
called_write_cells.append(True)
def check_called(func):
func()
self.assertTrue(len(called_save) >= 1)
self.assertTrue(len(called_write_cells) >= 1)
del called_save[:]
del called_write_cells[:]
register_writer(DummyClass)
writer = ExcelWriter('something.test')
tm.assert_isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
panel = tm.makePanel()
func = lambda: df.to_excel('something.test')
check_called(func)
check_called(lambda: panel.to_excel('something.test'))
val = get_option('io.excel.xlsx.writer')
set_option('io.excel.xlsx.writer', 'dummy')
check_called(lambda: df.to_excel('something.xlsx'))
check_called(lambda: df.to_excel('something.xls', engine='dummy'))
set_option('io.excel.xlsx.writer', val)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
jim-easterbrook/pyctools | src/pyctools/components/io/plotdata.py | 1 | 2672 | # Pyctools - a picture processing algorithm development kit.
# http://github.com/jim-easterbrook/pyctools
# Copyright (C) 2016-19 Pyctools contributors
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
__all__ = ['PlotData']
__docformat__ = 'restructuredtext en'
# matplotlib and pyctools.core.frame both import GObject, but pyctools
# will load pgi if available. pgi has to be set up before importing
# GObject, so import pyctools.core.frame before matplotlib
import pyctools.core.frame
# matplotlib sometimes chooses the wrong backend, so make it use Qt5
# https://gist.github.com/CMCDragonkai/4e9464d9f32f5893d837f3de2c43daa4
import matplotlib as mpl
mpl.use('Qt5Agg')
import matplotlib.pyplot as plt
from pyctools.core.base import Component
from pyctools.core.qt import QtEventLoop
class PlotData(Component):
"""Plot data on a graph.
The input frame's data should be a 2-D
:py:class:`numpy:numpy.ndarray`. The first row contains horizontal
(X) coordinates and the remaining rows contain corresponding Y
values.
"""
with_outframe_pool = False
outputs = []
event_loop = QtEventLoop
def initialise(self):
self.first_plot = True
def process_frame(self):
in_frame = self.input_buffer['input'].get()
labels = eval(in_frame.metadata.get('labels', '[]'))
data = in_frame.as_numpy()
x = data[0]
if self.first_plot:
plt.ion()
self.lines = []
for i, y in enumerate(data[1:]):
self.lines.append(plt.plot(x, y, '-')[0])
else:
for i, y in enumerate(data[1:]):
self.lines[i].set_xdata(x)
self.lines[i].set_ydata(y)
if labels:
plt.xlabel(labels[0])
if len(labels) > 1:
for i in range(min(len(labels) - 1, len(self.lines))):
self.lines[i].set(label=labels[i + 1])
plt.legend()
if self.first_plot:
plt.grid(True)
plt.show()
else:
plt.draw()
| gpl-3.0 |
davidgbe/scikit-learn | sklearn/metrics/pairwise.py | 21 | 44042 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/cluster/bicluster.py | 26 | 19870 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
moonbury/notebooks | github/MasteringMLWithScikit-learn/8365OS_04_Codes/scratch.py | 3 | 4232 | """
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
def main():
pipeline = Pipeline([
('vect', TfidfVectorizer(stop_words='english')),
('clf', LogisticRegression())
])
parameters = {
'vect__max_df': (0.25, 0.5),
'vect__ngram_range': ((1, 1), (1, 2)),
'vect__use_idf': (True, False),
'clf__C': (0.1, 1, 10),
}
df = pd.read_csv('data/train.tsv', header=0, delimiter='\t')
X, y = df['Phrase'], df['Sentiment'].as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5)
grid_search = GridSearchCV(pipeline, parameters, n_jobs=3, verbose=1, scoring='accuracy')
grid_search.fit(X_train, y_train)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
predictions = grid_search.predict(X_test)
print 'Accuracy:', accuracy_score(y_test, predictions)
print 'Confusion Matrix:', confusion_matrix(y_test, predictions)
print 'Classification Report:', classification_report(y_test, predictions)
if __name__ == '__main__':
main()
Fitting 3 folds for each of 24 candidates, totalling 72 fits
[Parallel(n_jobs=3)]: Done 1 jobs | elapsed: 3.3s
[Parallel(n_jobs=3)]: Done 50 jobs | elapsed: 1.1min
[Parallel(n_jobs=3)]: Done 68 out of 72 | elapsed: 1.9min remaining: 6.8s
[Parallel(n_jobs=3)]: Done 72 out of 72 | elapsed: 2.1min finished
Best score: 0.620
Best parameters set:
clf__C: 10
vect__max_df: 0.25
vect__ngram_range: (1, 2)
vect__use_idf: False
Accuracy: 0.636370626682
Confusion Matrix: [[ 1129 1679 634 64 9]
[ 917 6121 6084 505 35]
[ 229 3091 32688 3614 166]
[ 34 408 6734 8068 1299]
[ 5 35 494 2338 1650]]
Classification Report: precision recall f1-score support
0 0.49 0.32 0.39 3515
1 0.54 0.45 0.49 13662
2 0.70 0.82 0.76 39788
3 0.55 0.49 0.52 16543
4 0.52 0.36 0.43 4522
avg / total 0.62 0.64 0.62 78030
"""
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
def main():
pipeline = Pipeline([
('vect', TfidfVectorizer(stop_words='english')),
('clf', LogisticRegression())
])
parameters = {
'vect__max_df': (0.25, 0.5),
'vect__ngram_range': ((1, 1), (1, 2)),
'vect__use_idf': (True, False),
'clf__C': (0.1, 1, 10),
}
df = pd.read_csv('data/train.tsv', header=0, delimiter='\t')
X, y = df['Phrase'], df['Sentiment'].as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5)
grid_search = GridSearchCV(pipeline, parameters, n_jobs=3, verbose=1, scoring='accuracy')
grid_search.fit(X_train, y_train)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
predictions = grid_search.predict(X_test)
print 'Accuracy:', accuracy_score(y_test, predictions)
print 'Confusion Matrix:', confusion_matrix(y_test, predictions)
print 'Classification Report:', classification_report(y_test, predictions)
if __name__ == '__main__':
main() | gpl-3.0 |
procoder317/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
jakobworldpeace/scikit-learn | examples/neighbors/plot_kde_1d.py | 60 | 5120 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
flennerhag/mlens | mlens/externals/sklearn/exceptions.py | 1 | 4946 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior.
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
.. versionadded:: 0.18
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
.. versionchanged:: 0.18
Moved from sklearn.cross_validation.
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation, extends EfficiencyWarning.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
| mit |
rafaelmds/fatiando | gallery/gravmag/imaging_geninv.py | 6 | 3119 | """
Potential field imaging through the Generalized Inverse method
---------------------------------------------------------------
Module :mod:`fatiando.gravmag.imaging` has functions for imaging methods in
potential fields. These methods produce an image of the subsurface without
doing an inversion. However, there is a tradeoff with the quality of the result
being generally inferior to an inversion result.
Here we'll show how the Generalized Inverse imaging method can be used on some
synthetic data. We'll plot the final result as slices across the x, y, z axis.
"""
from __future__ import division
from fatiando import gridder, mesher
from fatiando.gravmag import prism, imaging
from fatiando.vis.mpl import square
import matplotlib.pyplot as plt
import numpy as np
# Make some synthetic gravity data from a simple prism model
model = [mesher.Prism(-1000, 1000, -3000, 3000, 0, 2000, {'density': 800})]
shape = (25, 25)
xp, yp, zp = gridder.regular((-5000, 5000, -5000, 5000), shape, z=-10)
data = prism.gz(xp, yp, zp, model)
# Run the Generalized Inverse
mesh = imaging.geninv(xp, yp, zp, data, shape, zmin=0, zmax=5000, nlayers=25)
# Plot the results
fig = plt.figure()
X, Y = xp.reshape(shape)/1000, yp.reshape(shape)/1000
image = mesh.props['density'].reshape(mesh.shape)
# First plot the original gravity data
ax = plt.subplot(2, 2, 1)
ax.set_title('Gravity data (mGal)')
ax.set_aspect('equal')
scale = np.abs([data.min(), data.max()]).max()
tmp = ax.contourf(Y, X, data.reshape(shape), 30, cmap="RdBu_r", vmin=-scale,
vmax=scale)
plt.colorbar(tmp, ax=ax, pad=0)
ax.set_xlim(Y.min(), Y.max())
ax.set_ylim(X.min(), X.max())
ax.set_xlabel('y (km)')
ax.set_ylabel('x (km)')
# Then plot model slices in the x, y, z directions through the middle of the
# model. Also show the outline of the true model for comparison.
scale = 0.1*np.abs([image.min(), image.max()]).max()
x = mesh.get_xs()/1000
y = mesh.get_ys()/1000
z = mesh.get_zs()/1000
x1, x2, y1, y2, z1, z2 = np.array(model[0].get_bounds())/1000
ax = plt.subplot(2, 2, 2)
ax.set_title('Model slice at z={} km'.format(z[len(z)//2]))
ax.set_aspect('equal')
ax.pcolormesh(y, x, image[mesh.shape[0]//2, :, :], cmap="cubehelix",
vmin=-scale, vmax=scale)
square([y1, y2, x1, x2])
ax.set_ylim(x.min(), x.max())
ax.set_xlim(y.min(), y.max())
ax.set_xlabel('y (km)')
ax.set_ylabel('x (km)')
ax = plt.subplot(2, 2, 3)
ax.set_title('Model slice at y={} km'.format(y[len(y)//2]))
ax.set_aspect('equal')
ax.pcolormesh(x, z, image[:, :, mesh.shape[1]//2], cmap="cubehelix",
vmin=-scale, vmax=scale)
square([x1, x2, z1, z2])
ax.set_ylim(z.max(), z.min())
ax.set_xlim(x.min(), x.max())
ax.set_xlabel('x (km)')
ax.set_ylabel('z (km)')
ax = plt.subplot(2, 2, 4)
ax.set_title('Model slice at x={} km'.format(x[len(x)//2]))
ax.set_aspect('equal')
ax.pcolormesh(y, z, image[:, mesh.shape[2]//2, :], cmap="cubehelix",
vmin=-scale, vmax=scale)
square([y1, y2, z1, z2])
ax.set_ylim(z.max(), z.min())
ax.set_xlim(y.min(), y.max())
ax.set_xlabel('y (km)')
ax.set_ylabel('z (km)')
plt.tight_layout()
plt.show()
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/decomposition/tests/test_pca.py | 9 | 18399 | import numpy as np
import scipy as sp
from itertools import product
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_less
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
solver_list = ['full', 'arpack', 'randomized', 'auto']
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
def test_pca_arpack_solver():
# PCA on dense arrays
X = iris.data
d = X.shape[1]
# Loop excluding the extremes, invalid inputs for arpack
for n_comp in np.arange(1, d):
pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(d), 12)
pca = PCA(n_components=0, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
pca = PCA(n_components=d, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
assert_equal(pca.n_components,
PCA(n_components=d,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_greater(X.std(axis=0).std(), 43.8)
for solver, copy in product(solver_list, (True, False)):
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(n_components=n_components, whiten=True, copy=copy,
svd_solver=solver, random_state=0, iterated_power=7)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=6)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = PCA(n_components=n_components, whiten=False, copy=copy,
svd_solver=solver).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full').fit(X)
apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X)
assert_array_almost_equal(pca.explained_variance_,
apca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
apca.explained_variance_ratio_, 3)
rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_pca = apca.transform(X)
assert_array_almost_equal(apca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
for solver in solver_list:
Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='full').fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
for solver in solver_list:
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for solver in solver_list:
for n_components in [-1, 3]:
assert_raises(ValueError,
PCA(n_components, svd_solver=solver).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by randomized PCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver='randomized',
random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by randomized PCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = PCA(n_components=1, svd_solver='randomized',
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle', svd_solver='full').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) +
np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5, svd_solver='full').fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert_true(ll1 > ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_svd_solver_auto():
rng = np.random.RandomState(0)
X = rng.uniform(size=(1000, 50))
# case: n_components in (0,1) => 'full'
pca = PCA(n_components=.5)
pca.fit(X)
pca_test = PCA(n_components=.5, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: max(X.shape) <= 500 => 'full'
pca = PCA(n_components=5, random_state=0)
Y = X[:10, :]
pca.fit(Y)
pca_test = PCA(n_components=5, svd_solver='full', random_state=0)
pca_test.fit(Y)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: n_components >= .8 * min(X.shape) => 'full'
pca = PCA(n_components=50)
pca.fit(X)
pca_test = PCA(n_components=50, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized'
pca = PCA(n_components=10, random_state=0)
pca.fit(X)
pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0)
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
def test_deprecation_randomized_pca():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA was "
"deprecated in 0.18 and will be "
"removed in 0.20. Use PCA(svd_solver='randomized') "
"instead. The new implementation DOES NOT store "
"whiten ``components_``. Apply transform to get them.")
def fit_deprecated(X):
global Y
rpca = RandomizedPCA(random_state=0)
Y = rpca.fit_transform(X)
assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X)
Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X)
assert_array_almost_equal(Y, Y_pca)
def test_pca_spase_input():
X = np.random.RandomState(0).rand(5, 4)
X = sp.sparse.csr_matrix(X)
assert(sp.sparse.issparse(X))
for svd_solver in solver_list:
pca = PCA(n_components=3, svd_solver=svd_solver)
assert_raises(TypeError, pca.fit, X)
| mit |
wangyum/spark | python/pyspark/ml/clustering.py | 5 | 62508 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, \
HasAggregationDepth, HasWeightCol, HasTol, HasProbabilityCol, HasDistanceMeasure, \
HasCheckpointInterval, Param, Params, TypeConverters
from pyspark.ml.util import JavaMLWritable, JavaMLReadable, GeneralJavaMLWritable, \
HasTrainingSummary, SparkContext
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.common import inherit_doc, _java2py
from pyspark.ml.stat import MultivariateGaussian
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel', 'KMeansSummary',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
@inherit_doc
class _GaussianMixtureParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasProbabilityCol, HasTol, HasAggregationDepth, HasWeightCol):
"""
Params for :py:class:`GaussianMixture` and :py:class:`GaussianMixtureModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_GaussianMixtureParams, self).__init__(*args)
self._setDefault(k=2, tol=0.01, maxIter=100, aggregationDepth=2)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureModel(JavaModel, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("3.0.0")
def gaussians(self):
"""
Array of :py:class:`MultivariateGaussian` where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i
"""
sc = SparkContext._active_spark_context
jgaussians = self._java_obj.gaussians()
return [
MultivariateGaussian(_java2py(sc, jgaussian.mean()), _java2py(sc, jgaussian.cov()))
for jgaussian in jgaussians]
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@since("3.0.0")
def predictProbability(self, value):
"""
Predict probability for the given features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class GaussianMixture(JavaEstimator, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. versionadded:: 2.0.0
Notes
-----
For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> gm.getMaxIter()
100
>>> gm.setMaxIter(30)
GaussianMixture...
>>> gm.getMaxIter()
30
>>> model = gm.fit(df)
>>> model.getAggregationDepth()
2
>>> model.getFeaturesCol()
'features'
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
>>> model.predict(df.head().features)
2
>>> model.predictProbability(df.head().features)
DenseVector([0.0, 0.0, 1.0])
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
65.02945...
>>> weights = model.weights
>>> len(weights)
3
>>> gaussians = model.gaussians
>>> len(gaussians)
3
>>> gaussians[0].mean
DenseVector([0.825, 0.8675])
>>> gaussians[0].cov
DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], 0)
>>> gaussians[1].mean
DenseVector([-0.87, -0.72])
>>> gaussians[1].cov
DenseMatrix(2, 2, [0.0016, 0.0016, 0.0016, 0.0016], 0)
>>> gaussians[2].mean
DenseVector([-0.055, -0.075])
>>> gaussians[2].cov
DenseMatrix(2, 2, [0.002, -0.0011, -0.0011, 0.0006], 0)
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[4].newPrediction == rows[5].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussians[0].mean == model.gaussians[0].mean
True
>>> model2.gaussians[0].cov == model.gaussians[0].cov
True
>>> model2.gaussians[1].mean == model.gaussians[1].mean
True
>>> model2.gaussians[1].cov == model.gaussians[1].cov
True
>>> model2.gaussians[2].mean == model.gaussians[2].mean
True
>>> model2.gaussians[2].cov == model.gaussians[2].cov
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
>>> gm2.setWeightCol("weight")
GaussianMixture...
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GaussianMixtureSummary(ClusteringSummary):
"""
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _KMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, HasTol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`KMeans` and :py:class:`KMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_KMeansParams, self).__init__(*args)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class KMeansModel(JavaModel, _KMeansParams, GeneralJavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class KMeans(JavaEstimator, _KMeansParams, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> kmeans = KMeans(k=2)
>>> kmeans.setSeed(1)
KMeans...
>>> kmeans.setWeightCol("weighCol")
KMeans...
>>> kmeans.setMaxIter(10)
KMeans...
>>> kmeans.getMaxIter()
10
>>> kmeans.clear(kmeans.maxIter)
>>> model = kmeans.fit(df)
>>> model.getDistanceMeasure()
'euclidean'
>>> model.setPredictionCol("newPrediction")
KMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.0
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("1.5.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.5.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.5.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.5.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class _BisectingKMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`BisectingKMeans` and :py:class:`BisectingKMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_BisectingKMeansParams, self).__init__(*args)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
class BisectingKMeansModel(JavaModel, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
.. deprecated:: 3.0.0
It will be removed in future versions. Use :py:class:`ClusteringEvaluator` instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 3.0.0. It will be removed in future versions. Use "
"ClusteringEvaluator instead. You can also get the cost on the training "
"dataset in the summary.", FutureWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class BisectingKMeans(JavaEstimator, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> bkm.setMaxIter(10)
BisectingKMeans...
>>> bkm.getMaxIter()
10
>>> bkm.clear(bkm.maxIter)
>>> bkm.setSeed(1)
BisectingKMeans...
>>> bkm.setWeightCol("weighCol")
BisectingKMeans...
>>> bkm.getSeed()
1
>>> bkm.clear(bkm.seed)
>>> model = bkm.fit(df)
>>> model.getMaxIter()
20
>>> model.setPredictionCol("newPrediction")
BisectingKMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.0
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.000...
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("3.0.0")
def trainingCost(self):
"""
Sum of squared distances to the nearest centroid for all points in the training dataset.
This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _LDAParams(HasMaxIter, HasFeaturesCol, HasSeed, HasCheckpointInterval):
"""
Params for :py:class:`LDA` and :py:class:`LDAModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
def __init__(self, *args):
super(_LDAParams, self).__init__(*args)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class LDAModel(JavaModel, _LDAParams):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
.. warning:: If this model is actually a :py:class:`DistributedLDAModel`
instance produced by the Expectation-Maximization ("em") `optimizer`,
then this method could involve collecting a large amount of data
to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
.. warning:: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes
-----
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. versionadded:: 2.0.0
Returns
-------
list
List of checkpoint files from training
Notes
-----
Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, _LDAParams, JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> lda.setMaxIter(10)
LDA...
>>> lda.getMaxIter()
10
>>> lda.clear(lda.maxIter)
>>> model = lda.fit(df)
>>> model.setSeed(1)
DistributedLDAModel...
>>> model.getTopicDistributionCol()
'topicDistribution'
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
>>> model.transform(df).take(1) == sameLocalModel.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
Examples
--------
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
Examples
--------
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
Examples
--------
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
Examples
--------
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
Examples
--------
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
Examples
--------
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
Examples
--------
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
Examples
--------
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
Examples
--------
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@inherit_doc
class _PowerIterationClusteringParams(HasMaxIter, HasWeightCol):
"""
Params for :py:class:`PowerIterationClustering`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_PowerIterationClusteringParams, self).__init__(*args)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@inherit_doc
class PowerIterationClustering(_PowerIterationClusteringParams, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
`Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the
abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. versionadded:: 2.4.0
Notes
-----
See `Wikipedia on Spectral clustering <http://en.wikipedia.org/wiki/Spectral_clustering>`_
Examples
--------
>>> data = [(1, 0, 0.5),
... (2, 0, 0.5), (2, 1, 0.7),
... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),
... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),
... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight").repartition(1)
>>> pic = PowerIterationClustering(k=2, weightCol="weight")
>>> pic.setMaxIter(40)
PowerIterationClustering...
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |0 |
|1 |0 |
|2 |0 |
|3 |0 |
|4 |0 |
|5 |1 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
>>> pic2.assignClusters(df).take(6) == assignments.take(6)
True
"""
@keyword_only
def __init__(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.4.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
CVML/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
adelomana/schema | conditionedFitness/pureCultures/script.py | 2 | 3732 | import sys,datetime
import matplotlib,matplotlib.pyplot
matplotlib.rcParams.update({'font.size':18,'font.family':'Arial','xtick.labelsize':14,'ytick.labelsize':14})
def dataReader():
generation='single colonies'
colonyCounts={}
dilutionFactors={}
fails=['colony # 13','colony # 14','colony # 15','colony # 18','colony # 19','colony # 20']
with open(dataFile,'r') as f:
for line in f:
vector=line.split(',')
if vector[0] == 'final':
if vector[1] == 'dilution factor':
a=int(vector[2].split('in')[0])
b=int(vector[2].split('in')[1])
c=int(vector[3].split('in')[0])
d=int(vector[3].split('in')[1])
dilutionFactors[generation]=[((a+b)/a)**3,((c+d)/c)**3]
elif 'colony' in vector[1] and vector[1] not in fails:
print(vector)
a=int(vector[2])
b=int(vector[3])
c=int(vector[4])
d=int(vector[5])
x=dilutionFactors['single colonies'][0]
y=dilutionFactors['single colonies'][1]
survival_nt=(b*y)/(a*x)
survival_t=(d*y)/(c*x)
cf=survival_t-survival_nt
print(a,b,c,d)
print('survival_t',survival_t)
print('survival_nt',survival_nt)
print('cf',cf)
print('')
colonyCounts[vector[1]]=[cf,survival_nt]
return colonyCounts
def tprint(value):
'''
this function prints a string with formatted time
'''
now=datetime.datetime.now()
formattedNow=now.strftime("%Y-%m-%d %H:%M:%S")
print('{}\t{}...'.format(formattedNow,value))
return None
### MAIN
# 0. user defined variables
dataFile='/Users/alomana/gDrive2/projects/centers/ap/src/assessmentGraphs/evol4/APEE4 Colony Counts - pure cultures.csv'
dataFile='/Users/adriandelomana/Google Drive/projects/centers/ap/src/assessmentGraphs/evol4/APEE4 Colony Counts - pure cultures.csv'
# 1. reading the data
tprint('reading data files')
colonyCounts=dataReader()
# 3. making figures
tprint('building figures')
exponential=['colony # 21','colony # 22','colony # 23','colony # 24','colony # 25','colony # 26','colony # 27','colony # 28','colony # 29','colony # 30']
n50=['colony # 1','colony # 2','colony # 3','colony # 4','colony # 5']
cf50=[]; cf300=[]
for colony in colonyCounts.keys():
print(colony,colonyCounts[colony])
x=colonyCounts[colony][0]
if colony in n50:
cf50.append(x)
else:
cf300.append(x)
# building the graph
clonalCFs=[-0.068974033701179882,0.071254510811961769,-0.091336423581641291]
pos1=[1 for element in clonalCFs]
pos2=[2 for element in cf50]
pos3=[3 for element in cf300]
mean50=0.21970559001182866
mean300=0.3357718732989069
matplotlib.pyplot.plot(pos1,clonalCFs,marker='o',color='green',alpha=0.5,markersize=10,lw=0,mew=0)
matplotlib.pyplot.plot(pos2,cf50,marker='o',color='black',alpha=0.5,markersize=10,lw=0,mew=0)
matplotlib.pyplot.plot(pos3,cf300,marker='o',color='black',alpha=0.5,markersize=10,lw=0,mew=0)
cf300.sort()
print(cf300)
matplotlib.pyplot.plot([2-0.05,2+0.05],[mean50,mean50],'-',color='blue',lw=3)
matplotlib.pyplot.plot([3-0.05,3+0.05],[mean300,mean300],'-',color='blue',lw=3)
matplotlib.pyplot.ylabel('Conditioned Fitness')
matplotlib.pyplot.xlim([0.5,3.5])
matplotlib.pyplot.xticks([1,2,3],['C1-3 n=0','E2 n=50', 'E1 n=300'])
matplotlib.pyplot.savefig('singleColoniesFigure.pdf')
| gpl-3.0 |
michigraber/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
phware/programingworkshop | Python/pandas_and_parallel/meso_surface.py | 8 | 2237 | import pandas as pd
import os
import datetime as dt
import numpy as np
import mesonet_calculations
from plotting import sfc_plot
''' For reading in and creating surface plots of mesonet data in a given time
interval saved in folders in the current working directory for each variable
plotted
'''
variables = {'temperature' : ('Degrees Celsius', '2 m Temperature'),
'pressure': ('Millibars', 'Sea Level Pressure'),
'dew_point': ('Degrees Celsius', 'Dewpoint'),
'wind_speed': ('Meters per second', '10 m Scalar Wind Speed'),
'gust_speed': ('Meters per second', '10 m Gust Wind Speed'),
'rainfall': ('Inches', 'Rainfall'),
}
# to_plot = 'temperature'
to_plot = np.array(['temperature', 'pressure', 'dew_point', 'wind_speed',
'gust_speed', 'rainfall'])
# Note that the start time should be divisble by 5 minutes
starttime = dt.datetime(2012, 6, 15, 1, 0)
endtime = dt.datetime(2012, 6, 15, 2, 0)
filename = 'locations.txt'
locations = pd.read_csv(filename, sep=' ')
met = pd.DataFrame()
dir = os.listdir('raw_data')
for file in dir:
if file[-4:] == '.txt':
met = pd.concat([met,
mesonet_calculations.meso_operations('raw_data/%s' %(file),
starttime,endtime,locations)], axis=0)
xmin = np.min(met['Lon'])
xmax = np.max(met['Lon'])
ymin = np.min(met['Lat'])
ymax = np.max(met['Lat'])
xi, yi = np.meshgrid(np.linspace(xmin, xmax, 200),
np.linspace(ymin, ymax, 200))
sfc_plot(starttime, endtime, to_plot[0], variables[to_plot[0]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[1], variables[to_plot[1]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[2], variables[to_plot[2]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[3], variables[to_plot[3]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[4], variables[to_plot[4]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
sfc_plot(starttime, endtime, to_plot[5], variables[to_plot[5]],
locations, met, xi, yi, xmin, xmax, ymin, ymax)
| mit |
tranlyvu/autonomous-vehicle-projects | Finding Lane Lines/src/finding_lane_lines.py | 1 | 6734 | #importing some packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import os
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
def draw_lines(img, lines, color=[255, 0, 0], thickness = 10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
left_x = []
left_y = []
right_x = []
right_y = []
for line in lines:
for x1,y1,x2,y2 in line:
# 1. we dont need horizontal; i.e slope =0 they are noise
# 2. slope >0 is right ; slope <0 is left
slope = ((y2-y1)/(x2-x1))
if (slope < 0):
left_x.append(x1)
left_x.append(x2)
left_y.append(y1)
left_y.append(y2)
elif (slope > 0):
right_x.append(x1)
right_x.append(x2)
right_y.append(y1)
right_y.append(y2)
if (len(left_x) > 0 and len(left_y) > 0):
# find coefficient
coeff_left = np.polyfit(left_x, left_y, 1)
# construct y =xa +b
func_left = np.poly1d(coeff_left)
x1L = int(func_left(0))
x2L = int(func_left(460))
cv2.line(img, (0, x1L), (460, x2L), color, thickness)
if (len(right_x) > 0 and len(right_y) > 0):
# find coefficient
coeff_right = np.polyfit(right_x, right_y, 1)
# construct y =xa +b
func_right = np.poly1d(coeff_right)
x1R = int(func_right(500))
x2R = int(func_right(img.shape[1]))
cv2.line(img, (500, x1R), (img.shape[1], x2R), color, thickness)
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, λ)
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# you should return the final output (image where lines are drawn on lanes)
img = grayscale(image)
# further smoothing to blur image for better result
img = gaussian_blur(img, kernel_size = 3)
img = canny(img, low_threshold = 80, high_threshold = 240)
# This time we are defining a four sided polygon to mask
imshape = image.shape
vertices = np.array([[(0,imshape[0]),(460, 320), (500, 320), (imshape[1],imshape[0])]], dtype=np.int32)
img = region_of_interest(img, vertices)
# Hough transform
line_image = hough_lines(img, rho = 2, theta= np.pi/180, threshold = 50, min_line_len = 40, max_line_gap = 20)
# Draw the lines on the edge image
final = weighted_img(line_image, image, α = 0.8, β = 1., λ = 0.)
return final
if __name__ == '__main__':
white_line_output = '../test_videos_output/solidWhiteRight.mp4'
clip1 = VideoFileClip('../test_videos/solidWhiteRight.mp4')
white_line_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
yellow_output = '../test_videos_output/solidYellowLeft.mp4'
clip2 = VideoFileClip('../test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
challenge_output = '../test_videos_output/challenge.mp4'
clip3 = VideoFileClip('../test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
| apache-2.0 |
raybuhr/pyfolio | pyfolio/txn.py | 5 | 4436 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from collections import defaultdict
import pandas as pd
def map_transaction(txn):
"""
Maps a single transaction row to a dictionary.
Parameters
----------
txn : pd.DataFrame
A single transaction object to convert to a dictionary.
Returns
-------
dict
Mapped transaction.
"""
# sid can either be just a single value or a SID descriptor
if isinstance(txn['sid'], dict):
sid = txn['sid']['sid']
symbol = txn['sid']['symbol']
else:
sid = txn['sid']
symbol = None
return {'sid': sid,
'symbol': symbol,
'price': txn['price'],
'order_id': txn['order_id'],
'amount': txn['amount'],
'commission': txn['commission'],
'dt': txn['dt']}
def make_transaction_frame(transactions):
"""
Formats a transaction DataFrame.
Parameters
----------
transactions : pd.DataFrame
Contains improperly formatted transactional data.
Returns
-------
df : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
"""
transaction_list = []
for dt in transactions.index:
txns = transactions.loc[dt]
if len(txns) == 0:
continue
for txn in txns:
txn = map_transaction(txn)
transaction_list.append(txn)
df = pd.DataFrame(sorted(transaction_list, key=lambda x: x['dt']))
df['txn_dollars'] = -df['amount'] * df['price']
df.index = list(map(pd.Timestamp, df.dt.values))
return df
def get_txn_vol(transactions):
"""Extract daily transaction data from set of transaction objects.
Parameters
----------
transactions : pd.DataFrame
Time series containing one row per symbol (and potentially
duplicate datetime indices) and columns for amount and
price.
Returns
-------
pd.DataFrame
Daily transaction volume and number of shares.
- See full explanation in tears.create_full_tear_sheet.
"""
amounts = transactions.amount.abs()
prices = transactions.price
values = amounts * prices
daily_amounts = amounts.groupby(amounts.index).sum()
daily_values = values.groupby(values.index).sum()
daily_amounts.name = "txn_shares"
daily_values.name = "txn_volume"
return pd.concat([daily_values, daily_amounts], axis=1)
def create_txn_profits(transactions):
"""
Compute per-trade profits.
Generates a new transactions DataFrame with a profits column
Parameters
----------
transactions : pd.DataFrame
Daily transaction volume and number of shares.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
profits_dts : pd.DataFrame
DataFrame containing transactions and their profits, datetimes,
amounts, current prices, prior prices, and symbols.
"""
txn_descr = defaultdict(list)
for symbol, transactions_sym in transactions.groupby('symbol'):
transactions_sym = transactions_sym.reset_index()
for i, (amount, price, dt) in transactions_sym.iloc[1:][
['amount', 'price', 'date_time_utc']].iterrows():
prev_amount, prev_price, prev_dt = transactions_sym.loc[
i - 1, ['amount', 'price', 'date_time_utc']]
profit = (price - prev_price) * -amount
txn_descr['profits'].append(profit)
txn_descr['dts'].append(dt - prev_dt)
txn_descr['amounts'].append(amount)
txn_descr['prices'].append(price)
txn_descr['prev_prices'].append(prev_price)
txn_descr['symbols'].append(symbol)
profits_dts = pd.DataFrame(txn_descr)
return profits_dts
| apache-2.0 |
ScopeFoundry/FoundryDataBrowser | viewers/drift_correction.py | 1 | 9644 | '''
Created on Jun 23, 2017
@author: dbdurham
'''
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import h5py
from skimage.feature.register_translation import _upsampled_dft, _compute_error, _compute_phasediff
def register_translation_hybrid(src_image, target_image, exponent = 1, upsample_factor=1,
space="real"):
"""
Efficient subpixel image translation registration by hybrid-correlation (cross and phase).
Exponent = 1 -> cross correlation, exponent = 0 -> phase correlation.
Closer to zero is more precise but more susceptible to noise.
This code gives the same precision as the FFT upsampled correlation
in a fraction of the computation time and with reduced memory requirements.
It obtains an initial estimate of the cross-correlation peak by an FFT and
then refines the shift estimation by upsampling the DFT only in a small
neighborhood of that estimate by means of a matrix-multiply DFT.
Parameters
----------
src_image : ndarray
Reference image.
target_image : ndarray
Image to register. Must be same dimensionality as ``src_image``.
exponent: float, optional
Power to which amplitude contribution to correlation is raised.
exponent = 0: Phase correlation
exponent = 1: Cross correlation
0 < exponent < 1 = Hybrid
upsample_factor : int, optional
Upsampling factor. Images will be registered to within
``1 / upsample_factor`` of a pixel. For example
``upsample_factor == 20`` means the images will be registered
within 1/20th of a pixel. Default is 1 (no upsampling)
space : string, one of "real" or "fourier"
Defines how the algorithm interprets input data. "real" means data
will be FFT'd to compute the correlation, while "fourier" data will
bypass FFT of input data. Case insensitive.
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``target_image`` with
``src_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X)
error : float
Translation invariant normalized RMS error between ``src_image`` and
``target_image``.
phasediff : float
Global phase difference between the two images (should be
zero if images are non-negative).
References
----------
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms,"
Optics Letters 33, 156-158 (2008).
"""
# images must be the same shape
if src_image.shape != target_image.shape:
raise ValueError("Error: images must be same size for "
"register_translation")
# only 2D data makes sense right now
if src_image.ndim != 2 and upsample_factor > 1:
raise NotImplementedError("Error: register_translation only supports "
"subpixel registration for 2D images")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
src_freq = src_image
target_freq = target_image
# real data needs to be fft'd.
elif space.lower() == 'real':
src_image = np.array(src_image, dtype=np.complex128, copy=False)
target_image = np.array(target_image, dtype=np.complex128, copy=False)
src_freq = np.fft.fftn(src_image)
target_freq = np.fft.fftn(target_image)
else:
raise ValueError("Error: register_translation only knows the \"real\" "
"and \"fourier\" values for the ``space`` argument.")
# Whole-pixel shift - Compute hybrid-correlation by an IFFT
shape = src_freq.shape
image_product = src_freq * target_freq.conj()
amplitude = np.abs(image_product)
phase = np.angle(image_product)
total_fourier = amplitude**exponent * np.exp(phase * 1j)
correlation = np.fft.ifftn(total_fourier)
# Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(correlation)),
correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]
if upsample_factor == 1:
src_amp = np.sum(np.abs(src_freq) ** 2) / src_freq.size
target_amp = np.sum(np.abs(target_freq) ** 2) / target_freq.size
CCmax = correlation.max()
# If upsampling > 1, then refine estimate with matrix multiply DFT
else:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * upsample_factor) / upsample_factor
upsampled_region_size = np.ceil(upsample_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
upsample_factor = np.array(upsample_factor, dtype=np.float64)
normalization = (src_freq.size * upsample_factor ** 2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts*upsample_factor
correlation = _upsampled_dft(total_fourier.conj(),
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.array(np.unravel_index(
np.argmax(np.abs(correlation)),
correlation.shape),
dtype=np.float64)
maxima -= dftshift
shifts = shifts + maxima / upsample_factor
CCmax = correlation.max()
src_amp = _upsampled_dft(src_freq * src_freq.conj(),
1, upsample_factor)[0, 0]
src_amp /= normalization
target_amp = _upsampled_dft(target_freq * target_freq.conj(),
1, upsample_factor)[0, 0]
target_amp /= normalization
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
for dim in range(src_freq.ndim):
if shape[dim] == 1:
shifts[dim] = 0
return shifts, _compute_error(CCmax, src_amp, target_amp),\
_compute_phasediff(CCmax)
def shift_subpixel(I, dx=0, dy=0):
# Shift an image with subpixel precision using the Fourier shift theorem
# Image to shift
G = np.fft.fft2(I)
# Prepare inverse pixel coordinate arrays
qy = np.array([np.fft.fftfreq(G.shape[0]),])
qy = np.repeat(qy.T, G.shape[1], axis=1)
qx = np.array([np.fft.fftfreq(G.shape[1]),])
qx = np.repeat(qx, G.shape[0], axis=0)
# Calculate shift plane wave
array_exp = np.exp(-2*np.pi*1j*(qx*dx + qy*dy))
# Perform shift
I_shift = np.fft.ifft2(G*array_exp)
return I_shift
def compute_pairwise_shifts(imstack):
# Calculates the pairwise shifts for images in a stack of format [frame, x, y].
# returns shift vector as [y, x] for each pair, a 2 x N-1 array where N is num_frames
scan_shape = imstack.shape
num_pairs = scan_shape[0]-1
print('Correcting ' + str(num_pairs) + ' frames...')
# Prepare window function (Hann)
win = np.outer(np.hanning(scan_shape[1]),np.hanning(scan_shape[2]))
# Pairwise shifts
shift = np.zeros((2, num_pairs))
for iPair in range(0, num_pairs):
image = imstack[iPair]
offset_image = imstack[iPair+1]
shift[:,iPair], error, diffphase = register_translation_hybrid(image*win, offset_image*win,
exponent = 0.3, upsample_factor = 100)
# Shifts are defined as [y, x] where y is shift of imaging location
# with respect to positive y axis, similarly for x
return shift
def compute_retained_box(shift_cumul, imshape):
# Computes coordinates and dimensions of area of image in view throughout the entire stack
# Uses cumulative shift vector [y, x] for each image, a 2 x N array with N = num_frames
# imshape is a tuple containing the (y, x) dimensions of the image in pixels
shift_cumul_y = shift_cumul[0,:]
shift_cumul_x = shift_cumul[1,:]
# NOTE: scan_shape indices 2, 3 correspond to y, x
y1 = int(round(np.max(shift_cumul_y[shift_cumul_y >= 0])+0.001, 0))
y2 = int(round(imshape[0] + np.min(shift_cumul_y[shift_cumul_y <= 0])-0.001, 0))
x1 = int(round(np.max(shift_cumul_x[shift_cumul_x >= 0])+0.001, 0))
x2 = int(round(imshape[1] + np.min(shift_cumul_x[shift_cumul_x <= 0])-0.001, 0))
boxfd = np.array([y1,y2,x1,x2])
boxdims = (boxfd[1]-boxfd[0], boxfd[3]-boxfd[2])
return boxfd, boxdims
def align_image_stack(imstack, shift_cumul_set, boxfd):
# Use fourier shift theorem to shift and align the images
# Takes imageset of format [frames, x, y]
# shift_cumul_set is the cumulative shifts associated with the image stack
# boxfd is the coordinate array [y1,y2,x1,x2] of the region in the original image that
# is conserved throughout the image stack
for iFrame in range(0, num_frames):
imstack[iFrame,:,:] = shift_subpixel(imstack[iFrame,:,:],
dx=shift_cumul_set[1, iFrame],
dy=shift_cumul_set[0, iFrame])
# Keep only preserved data
imstack = np.real(imstack[:,boxfd[0]:boxfd[1], boxfd[2]:boxfd[3]])
return imstack | bsd-3-clause |
neuropsychology/NeuroKit.py | examples/UnderDev/eeg/eeg_time_frequency.py | 1 | 11335 | """
Time-frequency submodule.
"""
from .eeg_data import eeg_select_electrodes
from ..miscellaneous import Time
import numpy as np
import pandas as pd
import mne
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_name_frequencies(freqs):
"""
Name frequencies according to standart classifications.
Parameters
----------
freqs : list or numpy.array
list of floats containing frequencies to classify.
Returns
----------
freqs_names : list
Named frequencies
Example
----------
>>> import neurokit as nk
>>>
>>> nk.eeg_name_frequencies([0.5, 1.5, 3, 5, 7, 15])
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None
"""
freqs = list(freqs)
freqs_names = []
for freq in freqs:
if freq < 1:
freqs_names.append("UltraLow")
elif freq <= 3:
freqs_names.append("Delta")
elif freq <= 7:
freqs_names.append("Theta")
elif freq <= 9:
freqs_names.append("Alpha1/Mu")
elif freq <= 12:
freqs_names.append("Alpha2/Mu")
elif freq <= 13:
freqs_names.append("Beta1/Mu")
elif freq <= 17:
freqs_names.append("Beta1")
elif freq <= 30:
freqs_names.append("Beta2")
elif freq <= 40:
freqs_names.append("Gamma1")
elif freq <= 50:
freqs_names.append("Gamma2")
else:
freqs_names.append("UltraHigh")
return(freqs_names)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_psd(raw, sensors_include="all", sensors_exclude=None, fmin=0.016, fmax=60, method="multitaper", proj=False):
"""
Compute Power-Spectral Density (PSD).
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
sensors_include : str
Sensor area to include. See :func:`neurokit.eeg_select_sensors()`.
sensors_exclude : str
Sensor area to exclude. See :func:`neurokit.eeg_select_sensors()`.
fmin : float
Min frequency of interest.
fmax: float
Max frequency of interest.
method : str
"multitaper" or "welch".
proj : bool
add projectors.
Returns
----------
mean_psd : pandas.DataFrame
Averaged PSDs.
Example
----------
>>> import neurokit as nk
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None
"""
picks = mne.pick_types(raw.info, include=eeg_select_electrodes(include=sensors_include, exclude=sensors_exclude), exclude="bads")
if method == "multitaper":
psds, freqs = mne.time_frequency.psd_multitaper(raw,
fmin=fmin,
fmax=fmax,
low_bias=True,
proj=proj,
picks=picks)
else:
psds, freqs = mne.time_frequency.psd_welch(raw,
fmin=fmin,
fmax=fmax,
proj=proj,
picks=picks)
tf = pd.DataFrame(psds)
tf.columns = eeg_name_frequencies(freqs)
tf = tf.mean(axis=0)
mean_psd = {}
for freq in ["UltraLow", "Delta", "Theta", "Alpha", "Alpha1", "Alpha2", "Mu", "Beta", "Beta1", "Beta2", "Gamma", "Gamma1", "Gamma2", "UltraHigh"]:
mean_psd[freq] = tf[[freq in s for s in tf.index]].mean()
mean_psd = pd.DataFrame.from_dict(mean_psd, orient="index").T
return(mean_psd)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_create_frequency_bands(bands="all", step=1):
"""
Delta: 1-3Hz
Theta: 4-7Hz
Alpha1: 8-9Hz
Alpha2: 10-12Hz
Beta1: 13-17Hz
Beta2: 18-30Hz
Gamma1: 31-40Hz
Gamma2: 41-50Hz
Mu: 8-13Hz
"""
if bands == "all" or bands == "All":
bands = ["Delta", "Theta", "Alpha", "Beta", "Gamma", "Mu"]
if "Alpha" in bands:
bands.remove("Alpha")
bands += ["Alpha1", "Alpha2"]
if "Beta" in bands:
bands.remove("Beta")
bands += ["Beta1", "Beta2"]
if "Gamma" in bands:
bands.remove("Gamma")
bands += ["Gamma1", "Gamma2"]
frequencies = {}
for band in bands:
if band == "Delta":
frequencies[band] = np.arange(1, 3+0.1, step)
if band == "Theta":
frequencies[band] = np.arange(4, 7+0.1, step)
if band == "Alpha1":
frequencies[band] = np.arange(8, 9+0.1, step)
if band == "Alpha2":
frequencies[band] = np.arange(10, 12+0.1, step)
if band == "Beta1":
frequencies[band] = np.arange(13, 17+0.1, step)
if band == "Beta2":
frequencies[band] = np.arange(18, 30+0.1, step)
if band == "Gamma1":
frequencies[band] = np.arange(31, 40+0.1, step)
if band == "Gamma2":
frequencies[band] = np.arange(41, 50+0.1, step)
if band == "Mu":
frequencies[band] = np.arange(8, 13+0.1, step)
return(frequencies)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_power_per_frequency_band(epoch, bands="all", step=1):
"""
"""
frequencies = eeg_create_frequency_bands(bands=bands, step=step)
power_per_band = {}
for band in frequencies:
power, itc = mne.time_frequency.tfr_morlet(epoch, freqs=frequencies[band], n_cycles=frequencies[band]/2, use_fft=True, return_itc=True, decim=3, n_jobs=1)
data = power.data
times = power.times
freqs = power.freqs
df = pd.DataFrame(np.average(data, axis=0).T, index=times, columns=freqs)
df = df.mean(axis=1)
power_per_band[band] = list(df)
df = pd.DataFrame.from_dict(power_per_band)
df.index = times
return(df)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def eeg_power_per_epoch(epochs, include="all", exclude=None, hemisphere="both", include_central=True, frequency_bands="all", time_start=0, time_end="max", fill_bads="NA", print_progression=True):
"""
"""
epochs = epochs.copy().pick_channels(eeg_select_sensors(include=include, exclude=exclude, hemisphere=hemisphere, include_central=include_central))
dropped = list(epochs.drop_log) # get events
frequencies = eeg_create_frequency_bands(bands=frequency_bands)
events = {}
n_epoch = 0
clock = Time()
for event_type in enumerate(dropped):
if event_type[1] == []:
df = eeg_power_per_frequency_band(epochs[n_epoch], bands=frequency_bands, step=1)
if time_end == "max":
df = df.loc[time_start:,:] # Select times
else:
df = df.loc[time_start:time_end,:] # Select times
df = df.mean(axis=0) # Compute average
events[event_type[0]] = list(df)
n_epoch += 1
else:
if fill_bads == "NA":
events[event_type[0]] = [np.nan]*len(frequencies)
else:
events[event_type[0]] = [fill_bads]*len(frequencies)
# Compute remaining time
time = clock.get(reset=False)/1000
time = time/(event_type[0]+1)
time = time * (len(dropped)-(event_type[0]+1))
if print_progression == True:
print(str(round((event_type[0]+1)/len(dropped)*100)) + "% complete, remaining time: " + str(round(time, 2)) + 's')
df = pd.DataFrame.from_dict(events, orient="index")
columns_names = ["Power_" + x for x in frequencies.keys()]
df.columns = columns_names
return(df)
| mit |
Tamme/mutationalsignaturesNCSUT | LDA/lda-c/ldac_wrapper.py | 1 | 1261 | import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import sys
sys.path.insert(1, '../')
sys.path.insert(1, '/home/tamme/Desktop/Masters/data')
#import file
from similarity import *
from scipy.spatial.distance import pdist, squareform
mat = scipy.io.loadmat('21breastWallHallsigs_4')
origMat = scipy.io.loadmat('21_WTSI_BRCA_whole_genome_substitutions.mat')
origMat2 = scipy.io.loadmat('21_WTSI_BRCA_whole_genome_substitutionsMutationOrder.mat')
#print mat
Wall = mat['Wall']
Hall = mat['Hall']
print Wall.shape
print Hall.shape
frob, avgStab, processStabAvg, H, exposureStd, W, centroidStd = evaluateStability(4, 100, Wall, Hall)
print processStabAvg
print H.shape
print W.shape
original = origMat['originalGenomes']
original2 = origMat2['originalGenomes']
print original.shape
difference = W.dot(H) - original
difference2 = W.dot(H) - original2
#print 'Error diff', difference.shape, np.sum(difference)
#frobenius = sqrt(sum(diag(t(diff) % * % diff)))
frob2 = np.sqrt(np.sum(difference.T.dot(difference).diagonal()))
frob3 = np.sqrt(np.sum(difference2.T.dot(difference2).diagonal()))
print frob2
print frob3
#scipy.io.savemat('WTEST.mat', mdict={'W': W, 'R':original, 'H':H})
#mutMatrix = mat['originalGenomes']
#mutMatrix = mutMatrix.T | gpl-3.0 |
Winand/pandas | pandas/tests/indexes/timedeltas/test_tools.py | 6 | 7568 | import pytest
from datetime import time, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
from pandas import (Series, Timedelta, to_timedelta, isna,
TimedeltaIndex)
from pandas._libs.tslib import iNaT
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert (to_timedelta('1 days 06:05:01.00003', box=False) ==
conv(d1 + np.timedelta64(6 * 3600 + 5 * 60 + 1, 's') +
np.timedelta64(30, 'us')))
assert (to_timedelta('15.5us', box=False) ==
conv(np.timedelta64(15500, 'ns')))
# empty string
result = to_timedelta('', box=False)
assert result.astype('int64') == iNaT
result = to_timedelta(['', ''])
assert isna(result).all()
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, 's')]))
expected = pd.Index(np.array([np.timedelta64(1, 's')]))
tm.assert_index_equal(result, expected)
# ints
result = np.timedelta64(0, 'ns')
expected = to_timedelta(0, box=False)
assert result == expected
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d', '1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([np.timedelta64(0, 'ns'), np.timedelta64(
10, 's').astype('m8[ns]')])
expected = to_timedelta([0, 10], unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
# arrays of various dtypes
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='s')
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='m')
expected = TimedeltaIndex([np.timedelta64(1, 'm')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='h')
expected = TimedeltaIndex([np.timedelta64(1, 'h')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 'D')] * 5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3) * 1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array(
[0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def test_to_timedelta_invalid(self):
# bad value for errors parameter
msg = "errors must be one of"
tm.assert_raises_regex(ValueError, msg, to_timedelta,
['foo'], errors='never')
# these will error
pytest.raises(ValueError, lambda: to_timedelta([1, 2], unit='foo'))
pytest.raises(ValueError, lambda: to_timedelta(1, unit='foo'))
# time not supported ATM
pytest.raises(ValueError, lambda: to_timedelta(time(second=1)))
assert to_timedelta(time(second=1), errors='coerce') is pd.NaT
pytest.raises(ValueError, lambda: to_timedelta(['foo', 'bar']))
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(['foo', 'bar'], errors='coerce'))
tm.assert_index_equal(TimedeltaIndex(['1 day', pd.NaT, '1 min']),
to_timedelta(['1 day', 'bar', '1 min'],
errors='coerce'))
# gh-13613: these should not error because errors='ignore'
invalid_data = 'apple'
assert invalid_data == to_timedelta(invalid_data, errors='ignore')
invalid_data = ['apple', '1 days']
tm.assert_numpy_array_equal(
np.array(invalid_data, dtype=object),
to_timedelta(invalid_data, errors='ignore'))
invalid_data = pd.Index(['apple', '1 days'])
tm.assert_index_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
invalid_data = Series(['apple', '1 days'])
tm.assert_series_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, 's')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'),
timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
assert actual.value == timedelta_NaT.astype('int64')
actual = pd.to_timedelta(pd.NaT)
assert actual.value == timedelta_NaT.astype('int64')
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
assert result == expected
result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1,
milliseconds=1, microseconds=1, nanoseconds=1)
expected = Timedelta(694861001001001)
assert result == expected
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
assert result == expected
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
assert result == expected
result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
assert result == expected
pytest.raises(TypeError, lambda: Timedelta(nanoseconds='abc'))
| bsd-3-clause |
jseabold/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
hainm/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
bhargav/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
JPFrancoia/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
summanlp/gensim | gensim/sklearn_api/lsimodel.py | 1 | 3431 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <[email protected]>
# Copyright (C) 2017 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class LsiTransformer(TransformerMixin, BaseEstimator):
"""
Base LSI module
"""
def __init__(self, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, onepass=True, power_iters=2, extra_samples=100):
"""
Sklearn wrapper for LSI model. See gensim.model.LsiModel for parameter details.
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.chunksize = chunksize
self.decay = decay
self.onepass = onepass
self.extra_samples = extra_samples
self.power_iters = power_iters
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.LsiModel
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(X)
else:
corpus = X
self.gensim_model = models.LsiModel(corpus=corpus, num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize,
decay=self.decay, onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples)
return self
def transform(self, docs):
"""
Takes a list of documents as input ('docs').
Returns a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
The input `docs` should be in BOW format and can be a list of documents like : [ [(4, 1), (7, 1)], [(9, 1), (13, 1)], [(2, 1), (6, 1)] ]
or a single document like : [(4, 1), (7, 1)]
"""
if self.gensim_model is None:
raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.")
# The input as array of array
check = lambda x: [x] if isinstance(x[0], tuple) else x
docs = check(docs)
X = [[] for i in range(0, len(docs))]
for k, v in enumerate(docs):
doc_topics = self.gensim_model[v]
# returning dense representation for compatibility with sklearn but we should go back to sparse representation in the future
probs_docs = matutils.sparse2full(doc_topics, self.num_topics)
X[k] = probs_docs
return np.reshape(np.array(X), (len(docs), self.num_topics))
def partial_fit(self, X):
"""
Train model over X.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(X)
if self.gensim_model is None:
self.gensim_model = models.LsiModel(num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize,
decay=self.decay, onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples)
self.gensim_model.add_documents(corpus=X)
return self
| lgpl-2.1 |
mojoboss/scikit-learn | sklearn/feature_extraction/text.py | 36 | 49753 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
lucidfrontier45/scikit-learn | examples/linear_model/plot_iris_logistic.py | 5 | 1646 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print __doc__
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1, figsize=(4, 3))
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(xx.min(), xx.max())
pl.ylim(yy.min(), yy.max())
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
jkorell/PTVS | Python/Product/ML/ProjectTemplates/RegressionTemplate/regression.py | 18 | 10263 | '''
This script perfoms the basic process for applying a machine learning
algorithm to a dataset using Python libraries.
The four steps are:
1. Download a dataset (using pandas)
2. Process the numeric data (using numpy)
3. Train and evaluate learners (using scikit-learn)
4. Plot and compare results (using matplotlib)
The data is downloaded from URL, which is defined below. As is normal
for machine learning problems, the nature of the source data affects
the entire solution. When you change URL to refer to your own data, you
will need to review the data processing steps to ensure they remain
correct.
============
Example Data
============
The example is from http://mldata.org/repository/data/viewslug/stockvalues/
It contains stock prices and the values of three indices for each day
over a five year period. See the linked page for more details about
this data set.
This script uses regression learners to predict the stock price for
the second half of this period based on the values of the indices. This
is a naive approach, and a more robust method would use each prediction
as an input for the next, and would predict relative rather than
absolute values.
'''
# Remember to update the script for the new data when you change this URL
URL = "http://mldata.org/repository/data/download/csv/stockvalues/"
# This is the column of the sample data to predict.
# Try changing it to other integers between 1 and 155.
TARGET_COLUMN = 32
# Uncomment this call when using matplotlib to generate images
# rather than displaying interactive UI.
#import matplotlib
#matplotlib.use('Agg')
from pandas import read_table
import numpy as np
import matplotlib.pyplot as plt
try:
# [OPTIONAL] Seaborn makes plots nicer
import seaborn
except ImportError:
pass
# =====================================================================
def download_data():
'''
Downloads the data for this script into a pandas DataFrame.
'''
# If your data is in an Excel file, install 'xlrd' and use
# pandas.read_excel instead of read_table
#from pandas import read_excel
#frame = read_excel(URL)
# If your data is in a private Azure blob, install 'azure' and use
# BlobService.get_blob_to_path() with read_table() or read_excel()
#import azure.storage
#service = azure.storage.BlobService(ACCOUNT_NAME, ACCOUNT_KEY)
#service.get_blob_to_path(container_name, blob_name, 'my_data.csv')
#frame = read_table('my_data.csv', ...
frame = read_table(
URL,
# Uncomment if the file needs to be decompressed
#compression='gzip',
#compression='bz2',
# Specify the file encoding
# Latin-1 is common for data from US sources
encoding='latin-1',
#encoding='utf-8', # UTF-8 is also common
# Specify the separator in the data
sep=',', # comma separated values
#sep='\t', # tab separated values
#sep=' ', # space separated values
# Ignore spaces after the separator
skipinitialspace=True,
# Generate row labels from each row number
index_col=None,
#index_col=0, # use the first column as row labels
#index_col=-1, # use the last column as row labels
# Generate column headers row from each column number
header=None,
#header=0, # use the first line as headers
# Use manual headers and skip the first row in the file
#header=0,
#names=['col1', 'col2', ...],
)
# Return the entire frame
#return frame
# Return a subset of the columns
return frame[[156, 157, 158, TARGET_COLUMN]]
# =====================================================================
def get_features_and_labels(frame):
'''
Transforms and scales the input data and returns numpy arrays for
training and testing inputs and targets.
'''
# Replace missing values with 0.0
# or we can use scikit-learn to calculate missing values below
#frame[frame.isnull()] = 0.0
# Convert values to floats
arr = np.array(frame, dtype=np.float)
# Normalize the entire data set
from sklearn.preprocessing import StandardScaler, MinMaxScaler
arr = MinMaxScaler().fit_transform(arr)
# Use the last column as the target value
X, y = arr[:, :-1], arr[:, -1]
# To use the first column instead, change the index value
#X, y = arr[:, 1:], arr[:, 0]
# Use 50% of the data for training, but we will test against the
# entire set
from sklearn.cross_validation import train_test_split
X_train, _, y_train, _ = train_test_split(X, y, test_size=0.5)
X_test, y_test = X, y
# If values are missing we could impute them from the training data
#from sklearn.preprocessing import Imputer
#imputer = Imputer(strategy='mean')
#imputer.fit(X_train)
#X_train = imputer.transform(X_train)
#X_test = imputer.transform(X_test)
# Normalize the attribute values to mean=0 and variance=1
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# To scale to a specified range, use MinMaxScaler
#from sklearn.preprocessing import MinMaxScaler
#scaler = MinMaxScaler(feature_range=(0, 1))
# Fit the scaler based on the training data, then apply the same
# scaling to both training and test sets.
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Return the training and test sets
return X_train, X_test, y_train, y_test
# =====================================================================
def evaluate_learner(X_train, X_test, y_train, y_test):
'''
Run multiple times with different algorithms to get an idea of the
relative performance of each configuration.
Returns a sequence of tuples containing:
(title, expected values, actual values)
for each learner.
'''
# Use a support vector machine for regression
from sklearn.svm import SVR
# Train using a radial basis function
svr = SVR(kernel='rbf', gamma=0.1)
svr.fit(X_train, y_train)
y_pred = svr.predict(X_test)
r_2 = svr.score(X_test, y_test)
yield 'RBF Model ($R^2={:.3f}$)'.format(r_2), y_test, y_pred
# Train using a linear kernel
svr = SVR(kernel='linear')
svr.fit(X_train, y_train)
y_pred = svr.predict(X_test)
r_2 = svr.score(X_test, y_test)
yield 'Linear Model ($R^2={:.3f}$)'.format(r_2), y_test, y_pred
# Train using a polynomial kernel
svr = SVR(kernel='poly', degree=2)
svr.fit(X_train, y_train)
y_pred = svr.predict(X_test)
r_2 = svr.score(X_test, y_test)
yield 'Polynomial Model ($R^2={:.3f}$)'.format(r_2), y_test, y_pred
# =====================================================================
def plot(results):
'''
Create a plot comparing multiple learners.
`results` is a list of tuples containing:
(title, expected values, actual values)
All the elements in results will be plotted.
'''
# Using subplots to display the results on the same X axis
fig, plts = plt.subplots(nrows=len(results), figsize=(8, 8))
fig.canvas.set_window_title('Predicting data from ' + URL)
# Show each element in the plots returned from plt.subplots()
for subplot, (title, y, y_pred) in zip(plts, results):
# Configure each subplot to have no tick marks
# (these are meaningless for the sample dataset)
subplot.set_xticklabels(())
subplot.set_yticklabels(())
# Label the vertical axis
subplot.set_ylabel('stock price')
# Set the title for the subplot
subplot.set_title(title)
# Plot the actual data and the prediction
subplot.plot(y, 'b', label='actual')
subplot.plot(y_pred, 'r', label='predicted')
# Shade the area between the predicted and the actual values
subplot.fill_between(
# Generate X values [0, 1, 2, ..., len(y)-2, len(y)-1]
np.arange(0, len(y), 1),
y,
y_pred,
color='r',
alpha=0.2
)
# Mark the extent of the training data
subplot.axvline(len(y) // 2, linestyle='--', color='0', alpha=0.2)
# Include a legend in each subplot
subplot.legend()
# Let matplotlib handle the subplot layout
fig.tight_layout()
# ==================================
# Display the plot in interactive UI
plt.show()
# To save the plot to an image file, use savefig()
#plt.savefig('plot.png')
# Open the image file with the default image viewer
#import subprocess
#subprocess.Popen('plot.png', shell=True)
# To save the plot to an image in memory, use BytesIO and savefig()
# This can then be written to any stream-like object, such as a
# file or HTTP response.
#from io import BytesIO
#img_stream = BytesIO()
#plt.savefig(img_stream, fmt='png')
#img_bytes = img_stream.getvalue()
#print('Image is {} bytes - {!r}'.format(len(img_bytes), img_bytes[:8] + b'...'))
# Closing the figure allows matplotlib to release the memory used.
plt.close()
# =====================================================================
if __name__ == '__main__':
# Download the data set from URL
print("Downloading data from {}".format(URL))
frame = download_data()
# Process data into feature and label arrays
print("Processing {} samples with {} attributes".format(len(frame.index), len(frame.columns)))
X_train, X_test, y_train, y_test = get_features_and_labels(frame)
# Evaluate multiple regression learners on the data
print("Evaluating regression learners")
results = list(evaluate_learner(X_train, X_test, y_train, y_test))
# Display the results
print("Plotting the results")
plot(results)
| apache-2.0 |
thientu/scikit-learn | sklearn/cross_validation.py | 47 | 67782 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3, shuffle=False, random_state=None):
super(LabelKFold, self).__init__(len(labels), n_folds, shuffle,
random_state)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
for i in range(self.n_folds):
yield (self.idxs == i)
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
ishay2b/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 62 | 3960 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with ops.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(
dataframe, batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with ops.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [
j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mesnilgr/fast-rcnn | tools/train_svms.py | 42 | 13247 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
return ((w * self.feature_scale, b * self.feature_scale),
pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_roidb()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| mit |
jdrudolph/photon | phos/algo/subnetwork.py | 1 | 5192 | import os
import shutil
import json
import goenrich
import pandas as pd
import numpy as np
import phos.algo.anat
def inference(exp, scores, network_undirected, anat, go, task_id, db, **kwargs):
active = scores[scores['Significant']]['GeneID']
# LOCAL
# subnetwork = phos.algo.anat.local_network(network_undirected,
# active, exp, task_id=task_id, **anat)
subnetwork = phos.algo.anat.remote_network(task_id, network_undirected,
active, anat.get('anchor', None)).astype(int)
if subnetwork is None:
print("Subnetwork did not contain any edges")
subnetwork = pd.DataFrame({'s':[-1], 't':['-1']})
# GO enrichment
O = goenrich.obo.ontology(db['go']['ontology'])
annotations = goenrich.read.gene2go(db['go']['annotations'])
nodes = pd.DataFrame({'GeneID' :
list(set(network_undirected['kin']) | set(network_undirected['sub']))})
background = goenrich.tools.generate_background(annotations, nodes, 'GO_ID', 'GeneID')
query = (set(subnetwork['s']) | set(subnetwork['t'])) - set([anat.get('anchor', None)])
goenrich.enrich.propagate(O, background, 'scores')
go_scores = goenrich.enrich.analyze(O, query, 'scores', **go)
return subnetwork, go_scores
import networkx as nx
from networkx.readwrite import json_graph
def create_graph(exp, scores, network, task_id, db, anchor=None):
""" generate result graph
:param exp: pandas.DataFrame with columns: Symbol, GeneID, Amino.Acid, Position, avg
:param scores: protein activity scores
:param network: pandas.DataFrame with columns 's' -> 't'
:param anchor: anchor id
"""
terminals = set(scores[scores['Significant']]['GeneID'])
G = nx.from_edgelist([[int(x) for x in y] for y in network[['s','t']].values])
df = (exp[['Symbol', 'GeneID', 'Amino.Acid', 'Position', 'avg']]
[exp['GeneID'].isin(network['s']) | exp['GeneID'].isin(network['t'])])
node_attributes = (df.groupby('GeneID')
.apply(lambda x : [{'AA': a, 'POS': int(p), 'AVG': v, 'NUM': len(x)} for a,p,v in
zip(x['Amino.Acid'].values, x['Position'].values, x['avg'].values)]))
geneinfo_table = pd.read_csv(db['geneinfo'], comment='#', usecols=[1, 2], header=None, sep='\t')
gene_name = dict(geneinfo_table.dropna().values)
for n in G:
G.node[n]['residues'] = node_attributes.get(n, [{'NUM' : 0}])
G.node[n]['type'] = 'terminal' if n in terminals else 'connector'
G.node[n]['name'] = gene_name.get(n, n)
if anchor in G and anchor is not None:
G.node[anchor]['type'] = 'anchor'
return G
def draw(exp, scores, network, task_id, template_dir, db, anchor=None):
""" generate interactive result graph based on d3js
check `phos/algo/anat.html` for the javascript part of the visualization
:param exp: pandas.DataFrame with columns: Symbol, GeneID, Amino.Acid, Position, avg
:param scores: protein activity scores
:param network: pandas.DataFrame with columns 's' -> 't'
:param anchor: anchor id
:returns HTML: the generated graph
"""
G = create_graph(exp, scores, network, task_id, db, anchor)
graph = json_graph.node_link_data(G)
node_index = {n['id']: i for i,n in enumerate(graph['nodes'])}
graph['links'] = [{'source': node_index[link['source']], 'target': node_index[link['target']]} for link in graph['links']]
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(template_dir))
HTML = env.get_template('result.html').render(task_id=task_id, graph=json.dumps(graph))
return HTML
def _rnd_go(exp, _score, network_undirected, anat, go, seed):
np.random.seed(seed)
rnd = (_score[['Significant']]
.reindex(np.random.permutation(_score.index))
.reset_index(drop=True))
rnd['GeneID'] = _score['GeneID']
_anat = anat.copy()
rand_dir = os.path.join(anat['anat_dir'], str(seed))
_anat.update({ 'anat_dir' : rand_dir,
'outfile' : os.path.join(anat['anat_dir'], 'random')})
_, go_rand = inference(exp, rnd, network_undirected, _anat, go)
shutil.rmtree(rand_dir)
return go_rand.dropna()
def inference_empiric_pvalue(exp, scores, network_undirected, anat, go, **kwargs):
from joblib import Parallel, delayed
perm = anat['permutations']
subnetwork, go_scores = inference(exp, scores, network_undirected, anat, go)
_anat = anat.copy()
if 'viz_out' in _anat:
del _anat['viz_out']
MAX_INT = np.iinfo(np.int32).max
seeds = np.random.randint(MAX_INT, size=perm)
_scores = scores.reset_index(drop=True)
rands = Parallel(n_jobs=35)(delayed(_rnd_go)(exp, _scores, network_undirected, _anat, go, seed) for seed in seeds)
random = pd.concat(rands)[['term', 'p']].rename(columns={'p' : 'p_random'})
df = (pd.merge(go_scores, random, how='left')
.dropna(subset=['q'])
.fillna(0))
df['p>p_random'] = df['p'] > df['p_random']
p_empiric = ((df.groupby('term')['p>p_random'].sum() / perm)
.to_frame(name='p_empiric')
.reset_index())
result = pd.merge(go_scores, p_empiric)
return subnetwork, result
| gpl-3.0 |
RobertABT/heightmap | build/matplotlib/doc/pyplots/make.py | 5 | 2007 | #!/usr/bin/env python
from __future__ import print_function
import sys, os, glob
import matplotlib
import IPython.Shell
#matplotlib.rcdefaults()
matplotlib.use('Agg')
mplshell = IPython.Shell.MatplotlibShell('mpl')
formats = [('png', 100),
('hires.png', 200),
('pdf', 72)]
def figs():
print('making figs')
import matplotlib.pyplot as plt
for fname in glob.glob('*.py'):
if fname.split('/')[-1] == __file__.split('/')[-1]: continue
basename, ext = os.path.splitext(fname)
imagefiles = dict([('%s.%s'%(basename, format), dpi)
for format, dpi in formats])
all_exists = True
for imagefile in imagefiles:
if not os.path.exists(imagefile):
all_exists = False
break
if all_exists:
print(' already have %s'%fname)
else:
print(' building %s'%fname)
plt.close('all') # we need to clear between runs
mplshell.magic_run(basename)
for imagefile, dpi in imagefiles.iteritems():
# todo: this will get called even if the run script
# fails and exits, thus creating a stub pdf and png
# iles preventing them from getting built successfully
# later
plt.savefig(imagefile, dpi=dpi)
print('all figures made')
def clean():
patterns = (['#*', '*~', '*pyc'] +
['*.%s' % format for format, dpi in formats])
for pattern in patterns:
for fname in glob.glob(pattern):
os.remove(fname)
print('all clean')
def all():
figs()
funcd = {'figs':figs,
'clean':clean,
'all':all,
}
if len(sys.argv)>1:
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are'%(
arg, funcd.keys()))
func()
else:
all()
| mit |
guschmue/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions_test.py | 59 | 13552 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testFillArraySmall(self):
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[64, 36], dtype=np.int32)
ff._fill_array(actual, a)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArrayLarge(self):
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
ff._fill_array(actual, a)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArraySmallWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[64, 36], dtype=np.int32)
ff._fill_array(actual, a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testFillArrayLargeWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
actual = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
ff._fill_array(actual, a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmall(self):
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLarge(self):
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = 0
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmallWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[32, 32], dtype=np.int32).tolist() +
np.ones(shape=[32, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.int32)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLargeWithSpecifiedValue(self):
fill_value = 8
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.int32).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.int32).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.int32)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededSmallWithSpecifiedNonNumericValue(self):
fill_value = False
a = (np.ones(shape=[32, 32], dtype=np.bool).tolist() +
np.ones(shape=[32, 36], dtype=np.bool).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[64, 36], dtype=np.bool)
expected[:32, 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
def testPadIfNeededLargeWithSpecifiedNonNumericValue(self):
fill_value = False
a = (np.ones(shape=[8, 8, 8, 8, 32], dtype=np.bool).tolist() +
np.ones(shape=[8, 8, 8, 8, 36], dtype=np.bool).tolist())
a = list(map(np.array, a))
actual = ff._pad_if_needed(a, fill_value)
expected = np.ones(shape=[16, 8, 8, 8, 36], dtype=np.bool)
expected[:8, ..., 32:] = fill_value
self.assertEqual(expected.tolist(), actual.tolist())
if __name__ == "__main__":
test.main()
| apache-2.0 |
adamgreenhall/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
pme1123/pyroots | pyroots/skeletonization.py | 1 | 8268 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 11 19:10:25 2016
@author: pme
Contents:
- _axis_length
- skeletonize_with_distance
"""
import pandas as pd
import numpy as np
from scipy import ndimage
from skimage import morphology
def _axis_length(image, labels=None, random=True, m=0.5):
"""
Calculate the length of each pixel in a non-straight line, based on
results in Kimura et al. (1999) [1]. This is a modified version of
skimage.morphology.perimeter [2].
Parameters
----------
image : array
skeletonized array. Binary image (required)
labels : array
Array of object labels. Integer values, for example from
``ndimage.label``. Default=None, which returns a single length
value for all objects in the image. If labels are given, then the
total length of each object is returned.
random : bool
Are the segments randomly oriented, or are they well ordered?
Default=``True``.
m : float
Withn range [0,1]. Change if random=``False``, otherwise the algorithm
will underestimate length (slightly). Default=0.5 This should work for
most situations. See [1].
Returns
-------
length : float
If no labels are included, the function returns the total length of
all objects in binary image.
or
length : ndarray
If a label array is included, the function returns an array of lengths
for each labelled segment.
See Also
--------
skimage.morphology.filter
References
----------
.. [1] Kimura K, Kikuchi S, Yamasaki S. 1999. Accurate root length
measurement by image analysis. Plant and Soil 216: 117–127.
.. [2] K. Benkrid, D. Crookes. Design and FPGA Implementation of
a Perimeter Estimator. The Queen's University of Belfast.
http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc
"""
image = image.astype(np.uint8)
#Define the connectivity list
if random is True:
pixel_weight = np.array(
#Generated from below, to weight the sum values of connectivity
#from the kernel passed to ndi.convolve
[0. , 0.5 , 0. , 0.474, 0. , 0.948, 0. , 1.422,
0. , 1.896, 0. , 0.67 , 0. , 1.144, 0. , 1.618,
0. , 2.092, 0. , 2.566, 0. , 1.341, 0. , 1.815,
0. , 2.289, 0. , 2.763, 0. , 3.237, 0. , 2.011,
0. , 2.485, 0. , 2.959, 0. , 3.433, 0. , 3.907,
0. , 2.681, 0. , 3.155, 0. , 3.629, 0. , 4.103,
0. , 4.577])
# weight_seq = np.arange(0, 50, step=1,
# dtype = "float")
# horiz_vert = (weight_seq % 10) // 2 #Count horiz, vertical connects
# diag = weight_seq // 10 #Count diagonal connections
# pixel_weight = 0.5*(horiz_vert + diag*(2**0.5))
# pixel_weight = pixel_weight*0.948 #Correction factor assuming
# #random orientations of lines
# pixel_weight[np.arange(0, 50, 2)] = 0 #Only odd numbers are on skeleton
# pixel_weight[1] = 0.5 #Account for lone pixels
#
else: #This weighting systematically overestimates (slightly), but the
#error should be smaller for well-oriented roots. See Kimura et al.
#(1999)
if m is 0.5:
pixel_weight = np.array(
#Generated from the else: statement below for m = 0.5
[0. , 0.5 , 0. , 0.5 , 0. , 1. , 0. , 1.5 ,
0. , 2. , 0. , 0.707, 0. , 1.151, 0. , 1.618,
0. , 2.096, 0. , 2.581, 0. , 1.414, 0. , 1.851,
0. , 2.303, 0. , 2.766, 0. , 3.236, 0. , 2.121,
0. , 2.555, 0. , 3. , 0. , 3.454, 0. , 3.915,
0. , 2.828, 0. , 3.26 , 0. , 3.702, 0. , 4.15 ,
0. , 4.606])
else:
weight_seq = np.arange(0, 50, step=1, dtype="float")
orth = (weight_seq % 10) // 2 #count orthogonal links
diag = weight_seq // 10 #count diagonal links
pixel_weight = 0.5 * ((diag**2 + (diag + orth*m)**2)**0.5 + orth*(1-m))
pixel_weight[np.arange(0, 50, 2)] = 0 #Only odd numbers are on skeleton
pixel_weight[1] = 0.5 #Account for lone pixels
#Run the connectivity kernel
kernel = np.array([[10, 2, 10],
[ 2, 1, 2],
[10, 2, 10]])
kernel_out = ndimage.convolve(image, kernel,
mode='constant', cval=0
)
#convert kernel_out to pixel length, for diagnostics
dims = kernel_out.shape
pixel_length = np.ones(dims) #most should become zeros
for i in range(dims[0]): #x dimension
for j in range(dims[1]): #y dimension
pixel_length[i,j] = pixel_weight[kernel_out[i,j]]
#match the value to that in the pixel_weight list
if labels is None:
# From original code - skimage.morphology.perimeter():
# "You can also write
# return perimeter_weights[perimeter_image].sum()
# but that was measured as taking much longer than bincount + np.dot
# (5x as much time)"
weight_bins = np.bincount(kernel_out.ravel(), minlength=50)
length_out = np.dot(weight_bins, pixel_weight)
return(weight_bins,
length_out)
else:
dims = kernel_out.shape
pixel_length = np.zeros(dims) #most should stay zeros
for i in range(dims[0]): #x dimension iterate
for j in range(dims[1]): #y dimension iterate
pixel_length[i,j] = pixel_weight[kernel_out[i,j]]
length_out = ndimage.sum(pixel_length,
labels=labels,
index=range(labels.max() + 1)[1:]
#might want to ignore index 0
#for large images.
)
return(pixel_length,
length_out)
def skeleton_with_distance(img, random=True, m=0.5):
"""
Created on Thu 21 Jul 2016 03:27:52 PM CDT
@author: pme
Calls ``morphology.medial_axis``, then calculates the length of each axis
using ``pyroots._axis_length``
Parameters
--------
img: array
boolean ndarray of objects
random: boolean
Are the segments randomly oriented? Default=``True``.
Feeds ``pyroots.axis_length``.
m: float
range [0,1]. Feeds ``pyroots._axis_length``. Default = 0.5. Change only
if ``random=False``.
Returns
--------
A dictionary containing:
1. "objects" : The objects image, ``img``
2. "length" : An ndarray of length for each pixel
3. "diameter" : An ndarray of diameter at each pixel
4. "geometry" : A pandas dataframe listing the mean length and mean diameter
for each object (including the open space)
"""
labels, labels_ls = ndimage.label(img)
skel, dist = morphology.medial_axis(img, return_distance=True)
dist_img = skel*2*dist #2x because medial axis distance is radius
label_skel = skel*labels
width_list = ndimage.mean(dist_img,
label_skel,
index=range(labels_ls+1)[1:]) #ignore empty space
length_img, length_list = _axis_length(skel, labels, random=random, m=m)
geom_df = pd.DataFrame({"Length" : np.insert(length_list, 0, 0), #to re-add the label index, 0
"Diameter" : np.insert(width_list, 0, 0)
})
out = {"objects" : img,
"length" : length_img,
"diameter" : dist_img,
"geometry" : geom_df}
return(out)
| apache-2.0 |
RomainBrault/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 73 | 1854 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
roxyboy/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
aje/POT | docs/source/auto_examples/plot_OT_L1_vs_L2.py | 1 | 5000 | # -*- coding: utf-8 -*-
"""
==========================================
2D Optimal transport for different metrics
==========================================
2D OT on empirical distributio with different gound metric.
Stole the figure idea from Fig. 1 and 2 in
https://arxiv.org/pdf/1706.07650.pdf
"""
# Author: Remi Flamary <[email protected]>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
##############################################################################
# Dataset 1 : uniform sampling
# ----------------------------
n = 20 # nb samples
xs = np.zeros((n, 2))
xs[:, 0] = np.arange(n) + 1
xs[:, 1] = (np.arange(n) + 1) * -0.001 # to make it strictly convex...
xt = np.zeros((n, 2))
xt[:, 1] = np.arange(n) + 1
a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples
# loss matrix
M1 = ot.dist(xs, xt, metric='euclidean')
M1 /= M1.max()
# loss matrix
M2 = ot.dist(xs, xt, metric='sqeuclidean')
M2 /= M2.max()
# loss matrix
Mp = np.sqrt(ot.dist(xs, xt, metric='euclidean'))
Mp /= Mp.max()
# Data
pl.figure(1, figsize=(7, 3))
pl.clf()
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
pl.title('Source and traget distributions')
# Cost matrices
pl.figure(2, figsize=(7, 3))
pl.subplot(1, 3, 1)
pl.imshow(M1, interpolation='nearest')
pl.title('Euclidean cost')
pl.subplot(1, 3, 2)
pl.imshow(M2, interpolation='nearest')
pl.title('Squared Euclidean cost')
pl.subplot(1, 3, 3)
pl.imshow(Mp, interpolation='nearest')
pl.title('Sqrt Euclidean cost')
pl.tight_layout()
##############################################################################
# Dataset 1 : Plot OT Matrices
# ----------------------------
#%% EMD
G1 = ot.emd(a, b, M1)
G2 = ot.emd(a, b, M2)
Gp = ot.emd(a, b, Mp)
# OT matrices
pl.figure(3, figsize=(7, 3))
pl.subplot(1, 3, 1)
ot.plot.plot2D_samples_mat(xs, xt, G1, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT Euclidean')
pl.subplot(1, 3, 2)
ot.plot.plot2D_samples_mat(xs, xt, G2, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT squared Euclidean')
pl.subplot(1, 3, 3)
ot.plot.plot2D_samples_mat(xs, xt, Gp, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT sqrt Euclidean')
pl.tight_layout()
pl.show()
##############################################################################
# Dataset 2 : Partial circle
# --------------------------
n = 50 # nb samples
xtot = np.zeros((n + 1, 2))
xtot[:, 0] = np.cos(
(np.arange(n + 1) + 1.0) * 0.9 / (n + 2) * 2 * np.pi)
xtot[:, 1] = np.sin(
(np.arange(n + 1) + 1.0) * 0.9 / (n + 2) * 2 * np.pi)
xs = xtot[:n, :]
xt = xtot[1:, :]
a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples
# loss matrix
M1 = ot.dist(xs, xt, metric='euclidean')
M1 /= M1.max()
# loss matrix
M2 = ot.dist(xs, xt, metric='sqeuclidean')
M2 /= M2.max()
# loss matrix
Mp = np.sqrt(ot.dist(xs, xt, metric='euclidean'))
Mp /= Mp.max()
# Data
pl.figure(4, figsize=(7, 3))
pl.clf()
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
pl.title('Source and traget distributions')
# Cost matrices
pl.figure(5, figsize=(7, 3))
pl.subplot(1, 3, 1)
pl.imshow(M1, interpolation='nearest')
pl.title('Euclidean cost')
pl.subplot(1, 3, 2)
pl.imshow(M2, interpolation='nearest')
pl.title('Squared Euclidean cost')
pl.subplot(1, 3, 3)
pl.imshow(Mp, interpolation='nearest')
pl.title('Sqrt Euclidean cost')
pl.tight_layout()
##############################################################################
# Dataset 2 : Plot OT Matrices
# -----------------------------
#%% EMD
G1 = ot.emd(a, b, M1)
G2 = ot.emd(a, b, M2)
Gp = ot.emd(a, b, Mp)
# OT matrices
pl.figure(6, figsize=(7, 3))
pl.subplot(1, 3, 1)
ot.plot.plot2D_samples_mat(xs, xt, G1, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT Euclidean')
pl.subplot(1, 3, 2)
ot.plot.plot2D_samples_mat(xs, xt, G2, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT squared Euclidean')
pl.subplot(1, 3, 3)
ot.plot.plot2D_samples_mat(xs, xt, Gp, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT sqrt Euclidean')
pl.tight_layout()
pl.show()
| mit |
josenavas/qiime | qiime/make_2d_plots.py | 9 | 23540 | #!/usr/bin/env python
# File created on 09 Feb 2010
# file make_2d_plots.py
__author__ = "Jesse Stombaugh and Micah Hamady"
__copyright__ = "Copyright 2011, The QIIME Project"
# remember to add yourself
__credits__ = ["Jesse Stombaugh", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
import re
from matplotlib import use
use('Agg', warn=False)
from matplotlib.pylab import *
from matplotlib.cbook import iterable, is_string_like
from matplotlib.patches import Ellipse
from matplotlib.font_manager import FontProperties
from string import strip
from numpy import array, asarray, ndarray
from time import strftime
from random import choice
from qiime.util import summarize_pcoas, isarray, qiime_system_call
from qiime.parse import group_by_field, group_by_fields, parse_coords
from qiime.colors import data_color_order, data_colors, \
get_group_colors, data_colors, iter_color_groups
from qiime.sort import natsort
from tempfile import mkdtemp
import os
import numpy
SCREE_TABLE_HTML = """<table cellpadding=0 cellspacing=0 border=1>
<tr><th align=center colspan=3 border=0>Scree plot</th></tr>
<tr>
<td class=normal align=center border=0>%s</td>
</tr>
</table>
<br><br>"""
TABLE_HTML = """<table cellpadding=0 cellspacing=0 border=1>
<tr><th align=center colspan=3 border=0>%s</th></tr>
<tr>
<td class=normal align=center border=0>%s</td>
<td class=normal align=center border=0>%s</td>
<td class=normal align=center border=0>%s</td>
</tr>
</table>
<br><br>"""
PAGE_HTML = """
<html>
<head>
<style type="text/css">
.normal { color: black; font-family:Arial,Verdana; font-size:12;
font-weight:normal;}
</style>
<script type="text/javascript" src="js/overlib.js"></script>
<title>%s</title>
</head>
<body>
<div id="overDiv" style="position:absolute; visibility:hidden; z-index:1000;">\
</div>
%s
</body>
</html>
"""
IMG_SRC = """<img src="%s" border=0 />"""
DOWNLOAD_LINK = """<a href="%s" >%s</a>"""
AREA_SRC = """<AREA shape="circle" coords="%d,%d,5" href="#%s" \
onmouseover="return overlib('%s');" onmouseout="return nd();">\n"""
IMG_MAP_SRC = """<img src="%s" border="0" ismap usemap="#points%s" width="%d" \
height="%d" />\n"""
MAP_SRC = """
<MAP name="points%s">
%s
</MAP>
"""
shape = [
's', # : square
'o', # : circle
'^', # : triangle up
'>', # : triangle right
'v', # : triangle down
'<', # : triangle left
'd', # : diamond
'p', # : pentagon
'h', # : hexagon
]
'''
data_colors={'blue':'#0000FF','lime':'#00FF00','red':'#FF0000', \
'aqua':'#00FFFF','fuchsia':'#FF00FF','yellow':'#FFFF00', \
'green':'#008000','maroon':'#800000','teal':'#008080', \
'purple':'#800080','olive':'#808000', \
'silver':'#C0C0C0','gray':'#808080'}
'''
default_colors = ['blue', 'lime', 'red', 'aqua', 'fuchsia', 'yellow', 'green',
'maroon', 'teal', 'purple', 'olive', 'silver', 'gray']
# This function used to live in make_3d_plots.py but in the Boulder sk-bio
# code sprint it got moved here to remove the 3D files.
def get_coord(coord_fname, method="IQR"):
"""Opens and returns coords location matrix and metadata.
Also two spread matrices (+/-) if passed a dir of coord files.
If only a single coord file, spread matrices are returned as None.
"""
if not os.path.isdir(coord_fname):
try:
coord_f = open(coord_fname, 'U')
except (TypeError, IOError):
raise MissingFileError('Coord file required for this analysis')
coord_header, coords, eigvals, pct_var = parse_coords(coord_f)
return [coord_header, coords, eigvals, pct_var, None, None]
else:
master_pcoa, support_pcoas = load_pcoa_files(coord_fname)
# get Summary statistics
coords, coords_low, coords_high, eigval_average, coord_header = \
summarize_pcoas(master_pcoa, support_pcoas, method=method)
pct_var = master_pcoa[3] # should be getting this from an average
# make_3d_plots expects coord_header to be a python list
coord_header = list(master_pcoa[0])
return (
[coord_header,
coords,
eigval_average,
pct_var,
coords_low,
coords_high]
)
def make_line_plot(
dir_path, data_file_link, background_color, label_color, xy_coords,
props, x_len=8, y_len=4, draw_axes=False, generate_eps=True):
""" Write a line plot
xy_coords: a dict of form {series_label:([x data], [y data], point_marker, color)}
(code adapted from Micah Hamady's code)
"""
rc('font', size='8')
rc('axes', linewidth=.5, edgecolor=label_color)
rc('axes', labelsize=8)
rc('xtick', labelsize=8)
rc('ytick', labelsize=8)
fig = figure(figsize=(x_len, y_len))
mtitle = props.get("title", "Groups")
x_label = props.get("xlabel", "X")
y_label = props.get("ylabel", "Y")
title('%s' % mtitle, fontsize='10', color=label_color)
xlabel(x_label, fontsize='8', color=label_color)
ylabel(y_label, fontsize='8', color=label_color)
sorted_keys = sorted(xy_coords.keys())
labels = []
for s_label in sorted_keys:
s_data = xy_coords[s_label]
c = s_data[3]
m = s_data[2]
plot(s_data[0], s_data[1], c=c, marker=m, label=s_label,
linewidth=.1, ms=5, alpha=1.0)
fp = FontProperties()
fp.set_size('8')
legend(prop=fp, loc=0)
show()
img_name = 'scree_plot.png'
savefig(
os.path.join(dir_path,
img_name),
dpi=80,
facecolor=background_color)
# Create zipped eps files
eps_link = ""
if generate_eps:
eps_img_name = str('scree_plot.eps')
savefig(os.path.join(dir_path, eps_img_name), format='eps')
out, err, retcode = qiime_system_call(
"gzip -f " + os.path.join(dir_path, eps_img_name))
eps_link = DOWNLOAD_LINK % ((os.path.join(data_file_link, eps_img_name)
+ ".gz"), "Download Figure")
return os.path.join(data_file_link, img_name), eps_link
def draw_scree_graph(dir_path, data_file_link, background_color, label_color,
generate_eps, data):
"""Draw scree plot
(code adapted from Micah Hamady's code)
"""
dimensions = len(data['coord'][3])
props = {}
props["title"] = "PCoA Scree Plot (First %s dimensions)" % dimensions
props["ylabel"] = "Fraction of Variance"
props["xlabel"] = "Principal component"
xy_coords = {}
x_points = [x for x in range(dimensions)]
c_data = [float(x) / 100.0 for x in data['coord'][3]]
xy_coords['Variance'] = (x_points, c_data, 'o', 'r')
cum_var = [c_data[0]]
for ix in range(dimensions - 1):
cum_var.append(cum_var[ix] + c_data[ix + 1])
xy_coords['Cumulative variance'] = (x_points, cum_var, 's', 'b')
img_src, eps_link = make_line_plot(
dir_path, data_file_link, background_color, label_color,
xy_coords=xy_coords, props=props, x_len=4.5,
y_len=4.5, generate_eps=generate_eps)
return IMG_SRC % img_src, eps_link
def make_interactive_scatter(plot_label, dir_path, data_file_link,
background_color, label_color, sample_location,
alpha, xy_coords,
props, x_len=8, y_len=4, size=10,
draw_axes=False, generate_eps=True):
"""Write interactive plot
xy_coords: a dict of form {series_label:([x data], [y data], \
[xy point label],[color])}
"""
my_axis = None
rc('font', size='8')
rc('patch', linewidth=0)
rc('axes', linewidth=.5, edgecolor=label_color)
rc('axes', labelsize=8)
rc('xtick', labelsize=8, color=label_color)
rc('ytick', labelsize=8, color=label_color)
sc_plot = draw_scatterplot(props, xy_coords, x_len, y_len, size,
background_color, label_color, sample_location,
alpha)
mtitle = props.get("title", "Groups")
x_label = props.get("xlabel", "X")
y_label = props.get("ylabel", "Y")
title('%s' % mtitle, fontsize='10', color=label_color)
xlabel(x_label, fontsize='8', color=label_color)
ylabel(y_label, fontsize='8', color=label_color)
show()
if draw_axes:
axvline(linewidth=.5, x=0, color=label_color)
axhline(linewidth=.5, y=0, color=label_color)
if my_axis is not None:
axis(my_axis)
img_name = x_label[0:3] + '_vs_' + y_label[0:3] + '_plot.png'
savefig(os.path.join(dir_path, img_name),
dpi=80, facecolor=background_color)
# Create zipped eps files
eps_link = ""
if generate_eps:
eps_img_name = str(x_label[0:3] + 'vs' + y_label[0:3] + 'plot.eps')
savefig(os.path.join(dir_path, eps_img_name), format='eps')
out, err, retcode = qiime_system_call(
"gzip -f " + os.path.join(dir_path, eps_img_name))
eps_link = DOWNLOAD_LINK % ((os.path.join(data_file_link, eps_img_name)
+ ".gz"), "Download Figure")
all_cids, all_xcoords, all_ycoords = transform_xy_coords(
xy_coords, sc_plot)
xmap, img_height, img_width = generate_xmap(
x_len, y_len, all_cids, all_xcoords,
all_ycoords)
points_id = plot_label + x_label[2:3] + y_label[2:3]
return IMG_MAP_SRC % (os.path.join(data_file_link, img_name), points_id,
img_width, img_height), MAP_SRC % \
(points_id, ''.join(xmap)), eps_link
def generate_xmap(x_len, y_len, all_cids, all_xcoords, all_ycoords):
"""Generates the html interactive image map"""
# Determine figure height and width"""
img_height = x_len * 80
img_width = y_len * 80
# Write html script which allows for mouseover of labels
xmap = []
for cid, x, y in zip(all_cids, all_xcoords, all_ycoords):
xmap.append(AREA_SRC % (x, img_height - y, cid, cid))
return xmap, img_height, img_width
def draw_scatterplot(props, xy_coords, x_len, y_len, size, background_color,
label_color, sample_location, alpha):
"""Create scatterplot figure"""
fig = figure(figsize=(x_len, y_len))
xPC = int(props['xlabel'][2:3])
yPC = int(props['ylabel'][2:3])
sorted_keys = xy_coords.keys()
scatters = {}
size_ct = shape_ct = 0
xPC = xPC - 1
yPC = yPC - 1
# Iterate through coords and add points to the scatterplot
for s_label in sorted_keys:
s_data = xy_coords[s_label]
if s_data[0] == []:
pass
else:
c = s_data[3]
m = s_data[4][0]
ax = fig.add_subplot(111, axisbg=background_color)
# set tick colors and width
for line in ax.yaxis.get_ticklines():
# line is a matplotlib.lines.Line2D instance
line.set_color(label_color)
line.set_markeredgewidth(1)
for line in ax.xaxis.get_ticklines():
# line is a matplotlib.lines.Line2D instance
line.set_color(label_color)
line.set_markeredgewidth(1)
if isarray(s_data[5][0]) and isarray(s_data[6][0]) and \
isarray(s_data[7][0]):
matrix_low = s_data[5][0]
matrix_high = s_data[6][0]
ellipse_ave = s_data[7][0]
ellipse_x = [ellipse_ave[sample_location[s_label], xPC]]
ellipse_y = [ellipse_ave[sample_location[s_label], yPC]]
width = [fabs(matrix_high[sample_location[s_label], xPC] -
matrix_low[sample_location[s_label], xPC])]
height = [fabs(matrix_high[sample_location[s_label], yPC] -
matrix_low[sample_location[s_label], yPC])]
sc_plot = scatter_ellipse(ax, ellipse_x,
ellipse_y, width, height, c=c, a=0.0,
alpha=alpha)
sc_plot.scatter(ellipse_x, ellipse_y, c=c, marker=m,
alpha=1.0)
else:
sc_plot = ax.scatter(s_data[0], s_data[1], c=c, marker=m,
alpha=1.0, s=size, linewidth=1, edgecolor=c)
size_ct += 1
shape_ct += 1
scatters[s_label] = sc_plot
return sc_plot
def transform_xy_coords(xy_coords, sc_plot):
"""Transform the coords from the scatterplot into coords that can be \
referenced in the html page"""
sorted_keys = xy_coords.keys()
all_cids = []
all_xcoords = []
all_ycoords = []
sc_plot.set_transform(sc_plot.axes.transData)
trans = sc_plot.get_transform()
for s_label in sorted_keys:
s_data = xy_coords[s_label]
if s_data[0] == []:
pass
else:
icoords = trans.transform(zip(s_data[0], s_data[1]))
xcoords, ycoords = zip(*icoords)
all_cids.extend(s_data[2])
all_xcoords.extend(xcoords)
all_ycoords.extend(ycoords)
return all_cids, all_xcoords, all_ycoords
def draw_pcoa_graph(plot_label, dir_path, data_file_link, coord_1, coord_2,
coord_1r, coord_2r, mat_ave, sample_location,
data, prefs, groups, colors, background_color, label_color,
data_colors, data_color_order,
generate_eps=True,
pct_variation_below_one=False):
"""Draw PCoA graphs"""
coords, pct_var = convert_coord_data_to_dict(data)
mapping = data['map']
if coord_1 not in coords:
raise ValueError("Principal coordinate: %s not available." % coord_1)
if coord_2 not in coords:
raise ValueError("Principal coordinate: %s not available." % coord_2)
# Handle matplotlib scale bug when all coords are 0.0
if not len([x for x in map(float, coords[coord_2]) if x != 0.0]):
for ix in range(len(coords[coord_2])):
coords[coord_2][ix] = '1e-255'
if not len([x for x in map(float, coords[coord_1]) if x != 0.0]):
for ix in range(len(coords[coord_1])):
coords[coord_1][ix] = '1e-255'
# Write figure labels
pct_exp1 = float(pct_var[coord_1])
pct_exp2 = float(pct_var[coord_2])
if float(pct_var['1']) < 1 and not pct_variation_below_one:
pct_exp1 *= 100
pct_exp2 *= 100
props = {}
props["title"] = "PCoA - PC%s vs PC%s" % (coord_1, coord_2)
props["ylabel"] = "PC%s - Percent variation explained %.2f%%" \
% (coord_2, pct_exp2)
props["xlabel"] = "PC%s - Percent variation explained %.2f%%" \
% (coord_1, pct_exp1)
labels = coords['pc vector number']
p1 = map(float, coords[coord_2])
p2 = map(float, coords[coord_1])
if isarray(coord_1r) and isarray(coord_2r) and isarray(mat_ave):
p1r = coord_2r
p2r = coord_1r
else:
p1r = None
p2r = None
mat_ave = None
if len(p1) != len(p2):
raise ValueError("Principal coordinate vectors unequal length.")
p1d = dict(zip(labels, p1))
p2d = dict(zip(labels, p2))
alpha = data['alpha']
xy_coords = extract_and_color_xy_coords(
p1d, p2d, p1r, p2r, mat_ave, colors,
data_colors, groups, coords)
img_src, img_map, eps_link = make_interactive_scatter(
plot_label, dir_path,
data_file_link, background_color, label_color,
sample_location, alpha,
xy_coords=xy_coords, props=props, x_len=4.5,
y_len=4.5, size=20, draw_axes=True,
generate_eps=generate_eps)
return img_src + img_map, eps_link
def extract_and_color_xy_coords(
p1d, p2d, p1dr, p2dr, mat_ave, colors, data_colors,
groups, coords):
"""Extract coords from appropriate columns and attach their \
corresponding colors based on the group"""
xy_coords = {}
shape_ct = 0
for group_name, ids in (groups.items()):
x = 0
color = data_colors[colors[group_name]].toHex()
m = shape[shape_ct % len(shape)]
shape_ct += 1
for id_ in (ids):
cur_labs = []
cur_x = []
cur_y = []
cur_color = []
cur_shape = []
cur_1r = []
cur_2r = []
new_mat_ave = []
if id_ in coords['pc vector number']:
cur_labs.append(id_ + ': ' + group_name)
cur_x.append(p2d[id_])
cur_y.append(p1d[id_])
cur_color.append(color)
cur_shape.append(m)
if isarray(p2dr) and isarray(p1dr) and isarray(mat_ave):
cur_1r.append(p1dr)
cur_2r.append(p2dr)
new_mat_ave.append(mat_ave)
else:
cur_1r = [None]
cur_2r = [None]
new_mat_ave = [None]
xy_coords["%s" % id_] = (cur_x, cur_y, cur_labs, cur_color,
cur_shape, cur_1r, cur_2r, new_mat_ave)
return xy_coords
def create_html_filename(coord_filename, name_ending):
"""Generate html filename using the given coord filename"""
outpath = coord_filename.split('/')[-1] + name_ending
return outpath
def convert_coord_data_to_dict(data):
"""Convert the coord data into a dictionary"""
coord_header = data['coord'][0]
coords = data['coord'][1]
pct_var = data['coord'][3]
coords_dict = {}
pct_var_dict = {}
coords_dict['pc vector number'] = coord_header
for x in range(len(coords)):
coords_dict[str(x + 1)] = coords[0:, x]
pct_var_dict[str(x + 1)] = pct_var[x]
return coords_dict, pct_var_dict
def write_html_file(out_table, outpath):
"""Write 2D plots into an html file"""
page_out = PAGE_HTML % (outpath, out_table)
out = open(outpath, "w+")
out.write(page_out)
out.close()
def generate_2d_plots(prefs, data, html_dir_path, data_dir_path, filename,
background_color, label_color, generate_scree,
pct_variation_below_one):
"""Generate interactive 2D scatterplots"""
coord_tups = [("1", "2"), ("3", "2"), ("1", "3")]
mapping = data['map']
out_table = ''
# Iterate through prefs and generate html files for each colorby option
# Sort by the column name first
sample_location = {}
groups_and_colors = iter_color_groups(mapping, prefs)
groups_and_colors = list(groups_and_colors)
for i in range(len(groups_and_colors)):
labelname = groups_and_colors[i][0]
groups = groups_and_colors[i][1]
colors = groups_and_colors[i][2]
data_colors = groups_and_colors[i][3]
data_color_order = groups_and_colors[i][4]
data_file_dir_path = mkdtemp(dir=data_dir_path)
new_link = os.path.split(data_file_dir_path)
data_file_link = os.path.join('.', os.path.split(new_link[-2])[-1],
new_link[-1])
new_col_name = labelname
img_data = {}
plot_label = labelname
if 'support_pcoas' in data:
matrix_average, matrix_low, matrix_high, eigval_average, m_names = \
summarize_pcoas(data['coord'], data['support_pcoas'],
method=data['ellipsoid_method'])
data['coord'] = \
(m_names, matrix_average, data['coord'][2], data['coord'][3])
for i in range(len(m_names)):
sample_location[m_names[i]] = i
else:
matrix_average = None
matrix_low = None
matrix_high = None
eigval_average = None
m_names = None
iterator = 0
for coord_tup in coord_tups:
if isarray(matrix_low) and isarray(matrix_high) and \
isarray(matrix_average):
coord_1r = asarray(matrix_low)
coord_2r = asarray(matrix_high)
mat_ave = asarray(matrix_average)
else:
coord_1r = None
coord_2r = None
mat_ave = None
sample_location = None
coord_1, coord_2 = coord_tup
img_data[coord_tup] = draw_pcoa_graph(
plot_label, data_file_dir_path,
data_file_link, coord_1, coord_2,
coord_1r, coord_2r, mat_ave,
sample_location,
data, prefs, groups, colors,
background_color, label_color,
data_colors, data_color_order,
generate_eps=True,
pct_variation_below_one=pct_variation_below_one)
out_table += TABLE_HTML % (labelname,
"<br>".join(img_data[("1", "2")]),
"<br>".join(img_data[("3", "2")]),
"<br>".join(img_data[("1", "3")]))
if generate_scree:
data_file_dir_path = mkdtemp(dir=data_dir_path)
new_link = os.path.split(data_file_dir_path)
data_file_link = os.path.join(
'.',
os.path.split(new_link[-2])[-1],
new_link[-1])
img_src, download_link = draw_scree_graph(
data_file_dir_path, data_file_link, background_color,
label_color, generate_eps=True, data=data)
out_table += SCREE_TABLE_HTML % ("<br>".join((img_src, download_link)))
outfile = create_html_filename(filename, '.html')
outfile = os.path.join(html_dir_path, outfile)
write_html_file(out_table, outfile)
def scatter_ellipse(axis_ob, x, y, w, h, c='b', a=0.0, alpha=0.5):
"""
SCATTER_ELLIPSE(x, y, w=None, h=None, c='b', a=0.0)
Make a scatter plot of x versus y with ellipses surrounding the
center point. w and h represent the width
and height of the ellipse that surround each x,y coordinate.
They are arrays of the same length as x or y. c is
a color and can be a single color format string or an length(x) array
of intensities which will be mapped by the colormap jet. a is the
angle or rotation in degrees of each ellipse (anti-clockwise). It is
also an array of the same length as x or y or a single value to be
iterated over all points.
"""
if not axis_ob._hold:
axis_ob.cla()
if not iterable(a):
a = [a] * len(x)
if not iterable(alpha):
alpha = [alpha] * len(x)
if len(c) != len(x):
raise ValueError('c and x are not equal lengths')
if len(w) != len(x):
raise ValueError('w and x are not equal lengths')
if len(h) != len(x):
raise ValueError('h and x are not equal lengths')
if len(a) != len(x):
raise ValueError('a and x are not equal lengths')
# if len(alpha)!=len(x):
# raise ValueError, 'alpha and x are not equal lengths'
patches = []
for thisX, thisY, thisW, thisH, thisC, thisA, thisAl in \
zip(x, y, w, h, c, a, alpha):
ellip = Ellipse((thisX, thisY), width=thisW, height=thisH,
angle=thisA)
ellip.set_facecolor(thisC)
ellip.set_alpha(thisAl)
axis_ob.add_patch(ellip)
patches.append(ellip)
axis_ob.autoscale_view()
return axis_ob
| gpl-2.0 |
caspar/KentLab | scripts/GMRplot3.py | 2 | 1307 | import sys
import numpy as np
import matplotlib.pyplot as plt
import math
#from datetime import datetime
#import os
# load csv file
PATH = "./data/"
FILENAME = "Sample_F5_21_2_1mA_trial2";
#TIMESTAMP = datetime.now().strftime('%D_%H:%M')
#OUTPUT = ''+ os.path.splitext(FILENAME)[0] + '_' + TIMESTAMP + '.png'
field, resistance = np.loadtxt((PATH+FILENAME), skiprows=0 , unpack=True, delimiter=' ');
print(OUTPUT)
color = []
array = []
i = 0
while (i < len(field) - 1):
if (float(field[i]) >= float(field[i+1])):
color = 'blue'
else:
color = 'red'
# array[i] = field[i], resistance[i], color[i]
# print(math.fabs(float(i)/10000.00))
fig = plt.plot(field[i], (resistance[i], '.', color = color)
i = i+1
def color():
global i
if i%3 == 0:
i = i + 1
print(i)
return 'red'
else:
i = i + 1
return 'blue'
# plot temperature vs. pressure + error bars
plt.ylabel("Resistance (Ohms)");
plt.xlabel("Magnetic Field Strength (Tesla)");
plt.title("Magnetoresistance in F5 Nanopillar");
plt.show();
plt.savefig((OUTPUT), dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
plt.close(fig)
| mit |
gregversteeg/corex_topic | corextopic/vis_topic.py | 1 | 26170 | """ This module implements some visualizations based on CorEx representations.
"""
import os
from shutil import copyfile
import codecs
import numpy as np
import matplotlib
matplotlib.use('Agg') # to create visualizations on a display-less server
import pylab
import networkx as nx
import textwrap
import scipy.sparse as ss
import sklearn.feature_extraction.text as skt
#import cPickle, pickle # neither module is used, and cPickle is not part of Anaconda build, so commented for LF run
import corextopic as ct
import sys, traceback
from time import time
import re
import sklearn.feature_extraction.text as skt
from nltk.stem.snowball import *
pattern = '\\b[A-Za-z]+\\b'
np.seterr(all='ignore')
def vis_rep(corex, data=None, row_label=None, column_label=None, prefix='topics'):
"""Various visualizations and summary statistics for a one layer representation"""
if column_label is None:
column_label = list(map(str, range(data.shape[1])))
if row_label is None:
row_label = list(map(str, range(corex.n_samples)))
alpha = corex.alpha
print('Print topics in text file')
output_groups(corex.tcs, alpha, corex.mis, column_label, corex.sign, prefix=prefix)
output_labels(corex.labels, row_label, prefix=prefix)
output_cont_labels(corex.p_y_given_x, row_label, prefix=prefix)
output_strong(corex.tcs, alpha, corex.mis, corex.labels, prefix=prefix)
anomalies(corex.log_z, row_label=row_label, prefix=prefix)
plot_convergence(corex.tc_history, prefix=prefix)
if data is not None:
plot_heatmaps(data, alpha, corex.mis, column_label, corex.p_y_given_x, prefix=prefix)
def vis_hierarchy(corexes, column_label=None, max_edges=100, prefix='topics', n_anchors=0):
"""Visualize a hierarchy of representations."""
if column_label is None:
column_label = list(map(str, range(corexes[0].alpha.shape[1])))
# make l1 label
alpha = corexes[0].alpha
mis = corexes[0].mis
l1_labels = []
annotate = lambda q, s: q if s > 0 else '~' + q
for j in range(corexes[0].n_hidden):
# inds = np.where(alpha[j] * mis[j] > 0)[0]
inds = np.where(alpha[j] >= 1.)[0]
inds = inds[np.argsort(-alpha[j, inds] * mis[j, inds])]
group_number = str('red_') + str(j) if j < n_anchors else str(j)
label = group_number + ':' + ' '.join([annotate(column_label[ind], corexes[0].sign[j,ind]) for ind in inds[:6]])
label = textwrap.fill(label, width=25)
l1_labels.append(label)
# Construct non-tree graph
weights = [corex.alpha.clip(0, 1) * corex.mis for corex in corexes[1:]]
node_weights = [corex.tcs for corex in corexes[1:]]
g = make_graph(weights, node_weights, l1_labels, max_edges=max_edges)
# Display pruned version
h = g.copy() # trim(g.copy(), max_parents=max_parents, max_children=max_children)
edge2pdf(h, prefix + '/graphs/graph_prune_' + str(max_edges), labels='label', directed=True, makepdf=True)
# Display tree version
tree = g.copy()
tree = trim(tree, max_parents=1, max_children=False)
edge2pdf(tree, prefix + '/graphs/tree', labels='label', directed=True, makepdf=True)
# Output JSON files
try:
import os
#copyfile(os.path.dirname(os.path.realpath(__file__)) + '/tests/d3_files/force.html', prefix + '/graphs/force.html')
copyfile(os.path.dirname(os.path.realpath('tests')) + '/tests/d3_files/force.html', prefix + '/graphs/force.html')
except:
print("Couldn't find 'force.html' file for visualizing d3 output")
import json
from networkx.readwrite import json_graph
mapping = dict([(n, tree._node[n].get('label', str(n))) for n in tree.nodes()])
tree = nx.relabel_nodes(tree, mapping)
json.dump(json_graph.node_link_data(tree), safe_open(prefix + '/graphs/force.json', 'w+'))
json.dump(json_graph.node_link_data(h), safe_open(prefix + '/graphs/force_nontree.json', 'w+'))
return g
def plot_heatmaps(data, alpha, mis, column_label, cont, topk=40, athresh=0.2, prefix=''):
import seaborn as sns
cmap = sns.cubehelix_palette(as_cmap=True, light=.9)
import matplotlib.pyplot as plt
m, nv = mis.shape
for j in range(m):
inds = np.where(np.logical_and(alpha[j] > athresh, mis[j] > 0.))[0]
inds = inds[np.argsort(- alpha[j, inds] * mis[j, inds])][:topk]
if len(inds) >= 2:
plt.clf()
order = np.argsort(cont[:,j])
if type(data) == np.ndarray:
subdata = data[:, inds][order].T
else:
# assume sparse
subdata = data[:, inds].toarray()
subdata = subdata[order].T
columns = [column_label[i] for i in inds]
fig, ax = plt.subplots(figsize=(20, 10))
sns.heatmap(subdata, vmin=0, vmax=1, cmap=cmap, yticklabels=columns, xticklabels=False, ax=ax, cbar_kws={"ticks": [0, 0.5, 1]})
plt.yticks(rotation=0)
filename = '{}/heatmaps/group_num={}.png'.format(prefix, j)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
plt.title("Latent factor {}".format(j))
plt.savefig(filename, bbox_inches='tight')
plt.close('all')
#plot_rels(data[:, inds], map(lambda q: column_label[q], inds), colors=cont[:, j],
# outfile=prefix + '/relationships/group_num=' + str(j), latent=labels[:, j], alpha=0.1)
def make_graph(weights, node_weights, column_label, max_edges=100):
all_edges = np.hstack(list(map(np.ravel, weights)))
max_edges = min(max_edges, len(all_edges))
w_thresh = np.sort(all_edges)[-max_edges]
print('weight threshold is %f for graph with max of %f edges ' % (w_thresh, max_edges))
g = nx.DiGraph()
max_node_weight = max([max(w) for w in node_weights])
for layer, weight in enumerate(weights):
m, n = weight.shape
for j in range(m):
g.add_node((layer + 1, j))
g._node[(layer + 1, j)]['weight'] = 0.3 * node_weights[layer][j] / max_node_weight
for i in range(n):
if weight[j, i] > w_thresh:
if weight[j, i] > w_thresh / 2:
g.add_weighted_edges_from([( (layer, i), (layer + 1, j), 10 * weight[j, i])])
else:
g.add_weighted_edges_from([( (layer, i), (layer + 1, j), 0)])
# Label layer 0
for i, lab in enumerate(column_label):
g.add_node((0, i))
g._node[(0, i)]['label'] = lab
g._node[(0, i)]['name'] = lab # JSON uses this field
g._node[(0, i)]['weight'] = 1
return g
def trim(g, max_parents=False, max_children=False):
for node in g:
if max_parents:
parents = list(g.successors(node))
#weights = [g.edge[node][parent]['weight'] for parent in parents]
weights = [g.adj[node][parent]['weight'] for parent in parents]
for weak_parent in np.argsort(weights)[:-max_parents]:
g.remove_edge(node, parents[weak_parent])
if max_children:
children = g.predecessors(node)
weights = [g.edge[child][node]['weight'] for child in children]
for weak_child in np.argsort(weights)[:-max_children]:
g.remove_edge(children[weak_child], node)
return g
def output_groups(tcs, alpha, mis, column_label, direction, thresh=0, prefix=''):
f = safe_open(prefix + '/groups.txt', 'w+')
h = safe_open(prefix + '/topics.txt', 'w+')
m, nv = mis.shape
annotate = lambda q, s: q if s >= 0 else '~' + q
for j in range(m):
f.write('Group num: %d, TC(X;Y_j): %0.3f\n' % (j, tcs[j]))
# inds = np.where(alpha[j] * mis[j] > thresh)[0]
inds = np.where(alpha[j] >= 1.)[0]
inds = inds[np.argsort(-alpha[j, inds] * mis[j, inds])]
for ind in inds:
f.write(column_label[ind] + u', %0.3f, %0.3f, %0.3f\n' % (
mis[j, ind], alpha[j, ind], mis[j, ind] * alpha[j, ind]))
#h.write(unicode(j) + u':' + u','.join([annotate(column_label[ind], direction[j,ind]) for ind in inds[:10]]) + u'\n')
h.write(str(j) + u':' + u','.join(
[annotate(column_label[ind], direction[j, ind]) for ind in inds[:10]]) + u'\n')
f.close()
h.close()
def output_labels(labels, row_label, prefix=''):
f = safe_open(prefix + '/labels.txt', 'w+')
ns, m = labels.shape
for l in range(ns):
f.write(row_label[l] + ',' + ','.join(list(map(lambda q: '%d' % q, labels[l, :])))+ '\n')
f.close()
def output_cont_labels(p_y_given_x, row_label, prefix=''):
f = safe_open(prefix + '/cont_labels.txt', 'w+')
ns, m = p_y_given_x.shape
for l in range(ns):
f.write(row_label[l] + ',' + ','.join(list(map(lambda q: '{:.10f}'.format(q), np.log(p_y_given_x[l, :])))) + '\n')
f.close()
def output_strong(tcs, alpha, mis, labels, prefix=''):
f = safe_open(prefix + '/most_deterministic_groups.txt', 'w+')
m, n = alpha.shape
topk = 5
ixy = np.clip(np.sum(alpha * mis, axis=1) - tcs, 0, np.inf)
hys = np.array([entropy(labels[:, j]) for j in range(m)]).clip(1e-6)
ntcs = [(np.sum(np.sort(alpha[j] * mis[j])[-topk:]) - ixy[j]) / ((topk - 1) * hys[j]) for j in range(m)]
f.write('Group num., NTC\n')
for j, ntc in sorted(enumerate(ntcs), key=lambda q: -q[1]):
f.write('%d, %0.3f\n' % (j, ntc))
f.close()
def anomalies(log_z, row_label=None, prefix=''):
from scipy.special import erf
ns = log_z.shape[0]
if row_label is None:
row_label = list(map(str, range(ns)))
a_score = np.sum(log_z[:, :], axis=1)
mean, std = np.mean(a_score), np.std(a_score)
a_score = (a_score - mean) / std
percentile = 1. / ns
anomalies = np.where(0.5 * (1 - erf(a_score / np.sqrt(2)) ) < percentile)[0]
f = safe_open(prefix + '/anomalies.txt', 'w+')
for i in anomalies:
f.write(row_label[i] + ', %0.1f\n' % a_score[i])
f.close()
# Utilities
# IT UTILITIES
def entropy(xsamples):
# sample entropy for one discrete var
xsamples = np.asarray(xsamples)
xsamples = xsamples[xsamples >= 0] # by def, -1 means missing value
xs = np.unique(xsamples)
ns = len(xsamples)
ps = np.array([float(np.count_nonzero(xsamples == x)) / ns for x in xs])
return -np.sum(ps * np.log(ps))
def safe_open(filename, mode):
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
return codecs.open(filename, mode, "utf-8")
# Visualization utilities
def neato(fname, position=None, directed=False):
if directed:
os.system(
"sfdp " + fname + ".dot -Tpdf -Earrowhead=none -Nfontsize=16 -GK=2 -Gmaxiter=1000 -Goverlap=False -Gpack=True -Gpackmode=clust -Gsep=0.01 -Gsplines=False -o " + fname + "_sfdp.pdf")
os.system(
"sfdp " + fname + ".dot -Tpdf -Earrowhead=none -Nfontsize=16 -GK=2 -Gmaxiter=1000 -Goverlap=False -Gpack=True -Gpackmode=clust -Gsep=0.01 -Gsplines=True -o " + fname + "_sfdp_w_splines.pdf")
return True
if position is None:
os.system("neato " + fname + ".dot -Tpdf -o " + fname + ".pdf")
os.system("fdp " + fname + ".dot -Tpdf -o " + fname + "fdp.pdf")
else:
os.system("neato " + fname + ".dot -Tpdf -n -o " + fname + ".pdf")
return True
def extract_color(label):
import matplotlib
colors = matplotlib.colors.cnames.keys()
parts = label.split('_')
for part in parts:
if part in colors:
parts.remove(part)
return '_'.join(parts), part
return label, 'black'
def edge2pdf(g, filename, threshold=0, position=None, labels=None, connected=True, directed=False, makepdf=True):
#This function will takes list of edges and a filename
#and write a file in .dot format. Readable, eg. by omnigraffle
# OR use "neato file.dot -Tpng -n -o file.png"
# The -n option says whether to use included node positions or to generate new ones
# for a grid, positions = [(i%28,i/28) for i in range(784)]
def cnn(node):
#change node names for dot format
if type(node) is tuple or type(node) is list:
#return u'n' + u'_'.join(list(map(unicode, node)))
return u'n' + u'_'.join(list(map(str, node)))
else:
return node
if connected:
touching = list(set(sum([[a, b] for a, b in g.edges()], [])))
g = nx.subgraph(g, touching)
print('non-isolated nodes,edges', len(list(g.nodes())), len(list(g.edges())))
f = safe_open(filename + '.dot', 'w+')
#print('f1->',f)
#print('directed->',directed)
if directed:
f.write("strict digraph {\n")#.decode("utf-8")
#f.write('strict digraph {')
else:
f.write("strict graph {")
#f.write("\tgraph [overlap=scale];\n".encode('utf-8'))
f.write("\tnode [shape=point];\n")
for a, b, d in g.edges(data=True):
if 'weight' in d:
if directed:
f.write(("\t" + cnn(a) + ' -> ' + cnn(b) + ' [penwidth=%.2f' % float(
np.clip(d['weight'], 0, 9)) + '];\n'))
else:
if d['weight'] > threshold:
f.write(("\t" + cnn(a) + ' -- ' + cnn(b) + ' [penwidth=' + str(3 * d['weight']) + '];\n'))
else:
if directed:
f.write(("\t" + cnn(a) + ' -> ' + cnn(b) + ';\n'))
else:
f.write(("\t" + cnn(a) + ' -- ' + cnn(b) + ';\n'))
for n in g.nodes():
if labels is not None:
if type(labels) == dict or type(labels) == list:
thislabel = labels[n].replace(u'"', u'\\"')
lstring = u'label="' + thislabel + u'",shape=none'
elif type(labels) == str:
#if g.node[n].has_key('label'):
if 'label' in g._node[n]:
thislabel = g._node[n][labels].replace(u'"', u'\\"')
# combine dupes
#llist = thislabel.split(',')
#thislabel = ','.join([l for l in set(llist)])
thislabel, thiscolor = extract_color(thislabel)
lstring = u'label="%s",shape=none,fontcolor="%s"' % (thislabel, thiscolor)
else:
weight = g._node[n].get('weight', 0.1)
if n[0] == 1:
lstring = u'shape=circle,margin="0,0",style=filled,fillcolor=black,fontcolor=white,height=%0.2f,label="%d"' % (
2 * weight, n[1])
else:
lstring = u'shape=point,height=%0.2f' % weight
else:
lstring = 'label="' + str(n) + '",shape=none'
lstring = str(lstring)
else:
lstring = False
if position is not None:
if position == 'grid':
position = [(i % 28, 28 - i / 28) for i in range(784)]
posstring = unicode('pos="' + str(position[n][0]) + ',' + str(position[n][1]) + '"')
else:
posstring = False
finalstring = u' [' + u','.join([ts for ts in [posstring, lstring] if ts]) + u']\n'
#finalstring = u' ['+lstring+u']\n'
f.write(u'\t' + cnn(n) + finalstring)
f.write("}")
f.close()
if makepdf:
neato(filename, position=position, directed=directed)
return True
def predictable(out, data, wdict=None, topk=5, outfile='sorted_groups.txt', graphs=False, prefix='', athresh=0.5,
tvalue=0.1):
alpha, labels, lpygx, mis, lasttc = out[:5]
ns, m = labels.shape
m, nv = mis.shape
hys = [entropy(labels[:, j]) for j in range(m)]
#alpha = np.array([z[2] for z in zs]) # m by nv
nmis = []
ixys = []
for j in range(m):
if hys[j] > 0:
#ixy = np.dot((alpha[j]>0.95).astype(int),mis[j])-lasttc[-1][j]
ixy = max(0., np.dot(alpha[j], mis[j]) - lasttc[-1][j])
ixys.append(ixy)
tcn = (np.sum(np.sort(alpha[j] * mis[j])[-topk:]) - ixy) / ((topk - 1) * hys[j])
nmis.append(tcn) #ixy) #/hys[j])
else:
ixys.append(0)
nmis.append(0)
f = safe_open(prefix + outfile, 'w+')
print(list(enumerate(np.argsort(-np.array(nmis)))))
print(','.join(list(map(str, list(np.argsort(-np.array(nmis)))))))
for i, top in enumerate(np.argsort(-np.array(nmis))):
f.write('Group num: %d, Score: %0.3f\n' % (top, nmis[top]))
inds = np.where(alpha[top] > athresh)[0]
inds = inds[np.argsort(-mis[top, inds])]
for ind in inds:
f.write(wdict[ind] + ', %0.3f\n' % (mis[top, ind] / np.log(2)))
if wdict:
print(','.join(list(map(lambda q: wdict[q], inds))))
print(','.join(list(map(str, inds))))
print(top)
print(nmis[top], ixys[top], hys[top], ixys[top] / hys[top]) #,lasttc[-1][top],hys[top],lasttc[-1][top]/hys[top]
if graphs:
print(inds)
if len(inds) >= 2:
plot_rels(data[:, inds[:5]], list(map(lambda q: wdict[q], inds[:5])),
outfile='relationships/' + str(i) + '_group_num=' + str(top), latent=out[1][:, top],
alpha=tvalue)
f.close()
return nmis
def shorten(s, n=12):
if len(s) > 2 * n:
return s[:n] + '..' + s[-n:]
return s
def plot_convergence(tc_history, prefix=''):
pylab.plot(tc_history)
pylab.xlabel('number of iterations')
filename = prefix + '/convergence.pdf'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
pylab.savefig(filename)
pylab.close('all')
return True
def chunks(doc, n=100):
"""Yield successive approximately equal and n-sized chunks from l."""
words = doc.split()
if len(words) == 0:
yield ''
n_chunks = len(words) / n # Round down
if n_chunks == 0:
n_per_chunk = n
else:
n_per_chunk = int(np.ceil(float(len(words)) / n_chunks)) # round up
for i in xrange(0, len(words), n_per_chunk):
yield ' '.join(words[i:i+n])
# Utilities to construct generalized binary bag of words matrices
def av_bbow(docs, n=100):
"""Average binary bag of words if we take chunks of a doc of size n"""
proc = skt.CountVectorizer(token_pattern=pattern)
proc.fit(docs)
n_doc, n_words = len(docs), len(proc.vocabulary_)
mat = ss.lil_matrix((n_doc, n_words))
for l, doc in enumerate(docs):
subdocs = chunks(doc, n=n)
mat[l] = (proc.transform(subdocs) > 0).mean(axis=0).A.ravel()
return mat.asformat('csr'), proc
def bow(docs):
"""Standard bag of words"""
proc = skt.CountVectorizer(token_pattern=pattern)
return proc.fit_transform(docs), proc
def all_bbow(docs, n=100):
"""Split each document into a subdocuments of size n, and return as binary BOW"""
proc = skt.CountVectorizer(token_pattern=pattern)
proc.fit(docs)
ids = []
for l, doc in enumerate(docs):
subdocs = chunks(doc, n=n)
submat = (proc.transform(subdocs) > 0)
if l == 0:
mat = submat
else:
mat = ss.vstack([mat, submat])
ids += [l]*submat.shape[0]
return mat.asformat('csr'), proc, ids
def file_to_array(filename, stemming=False, strategy=2, words_per_doc=100, n_words=10000):
pattern = '\\b[A-Za-z]+\\b'
stemmer = SnowballStemmer('english')
with open(filename, 'rU') as input_file:
docs = []
for line in input_file:
if stemming:
docs.append(' '.join([stemmer.stem(w) for w in re.findall(pattern, line)]))
else:
docs.append(' '.join([w for w in re.findall(pattern, line)]))
print('processing file')
if strategy == 1:
X, proc = av_bbow(docs, n=words_per_doc)
elif strategy == 2:
X, proc, ids = all_bbow(docs, n=words_per_doc)
else:
X, proc = bow(docs)
var_order = np.argsort(-X.sum(axis=0).A1)[:n_words]
X = X[:, var_order]
#Dictionary
ivd = {v: k for k, v in proc.vocabulary_.items()}
words = [ivd[v] for v in var_order]
return X, words
if __name__ == '__main__':
# Command line interface
# Sample commands:
# python vis_topic.py tests/data/twenty.txt --n_words=2000 --layers=20,3,1 -v --edges=50 -o test_output
from optparse import OptionParser, OptionGroup
parser = OptionParser(usage="usage: %prog [options] data_file.csv \n"
"Assume one document on each line.")
group = OptionGroup(parser, "Options")
group.add_option("-n", "--n_words",
action="store", dest="n_words", type="int", default=10000,
help="Maximum number of words to include in dictionary.")
group.add_option("-l", "--layers", dest="layers", type="string", default="2,1",
help="Specify number of units at each layer: 5,3,1 has "
"5 units at layer 1, 3 at layer 2, and 1 at layer 3")
group.add_option("-t", "--strategy", dest="strategy", type="int", default=0,
help="Specify the strategy for handling non-binary count data.\n"
"0. Naive binarization. This will be good for documents of similar length and especially"
"short documents.\n"
"1. Average binary bag of words. We split documents into chunks, compute the binary "
"bag of words for each documents and then average. This implicitly weights all documents"
"equally.\n"
"2. All binary bag of words. Split documents into chunks and consider each chunk as its"
"own binary bag of words documents. This changes the number of documents so it may take"
"some work to match the ids back, if desired.\n"
"3. Fractional counts. This converts counts into a fraction of the background rate, with 1 as"
"the max. Short documents tend to stay binary and words in long documents are weighted"
"according to their frequency with respect to background in the corpus.")
group.add_option("-o", "--output",
action="store", dest="output", type="string", default="topic_output",
help="A directory to put all output files.")
group.add_option("-s", "--stemming",
action="store_false", dest="stemming", default=True,
help="Use a stemmer on words.")
group.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="Print rich outputs while running.")
group.add_option("-w", "--words_per_doc",
action="store", dest="words_per_doc", type="int", default=300,
help="If using all_bbow or av_bbow, this specifies the number of words each "
"to split documents into.")
group.add_option("-e", "--edges",
action="store", dest="max_edges", type="int", default=1000,
help="Show at most this many edges in graphs.")
group.add_option("-q", "--regraph",
action="store_true", dest="regraph", default=False,
help="Don't re-run corex, just re-generate outputs (with number of edges changed).")
parser.add_option_group(group)
(options, args) = parser.parse_args()
if not len(args) == 1:
print("Run with '-h' option for usage help.")
sys.exit()
layers = list(map(int, options.layers.split(',')))
if layers[-1] != 1:
layers.append(1) # Last layer has one unit for convenience so that graph is fully connected.
#Load data from text file
print('reading file')
X, words = file_to_array(args[0], stemming=options.stemming, strategy=options.strategy,
words_per_doc=options.words_per_doc, n_words=options.n_words)
# cPickle.dump(words, open(options.prefix + '/dictionary.dat', 'w')) # TODO: output dictionary
# Run CorEx on data
if options.verbose:
np.set_printoptions(precision=3, suppress=True) # For legible output from numpy
print('\nData summary: X has %d rows and %d columns' % X.shape)
print('Variable names are: ' + ','.join(words))
print('Getting CorEx results')
if options.strategy == 3:
count = 'fraction'
else:
count = 'binarize' # Strategies 1 and 2 already produce counts <= 1 and are not affected by this choice.
if not options.regraph:
for l, layer in enumerate(layers):
if options.verbose:
print("Layer ", l)
if l == 0:
t0 = time()
corexes = [ct.Corex(n_hidden=layer, verbose=options.verbose, count=count).fit(X)]
print('Time for first layer: %0.2f' % (time() - t0))
else:
X_prev = np.matrix(corexes[-1].labels)
corexes.append(ct.Corex(n_hidden=layer, verbose=options.verbose).fit(X_prev))
for l, corex in enumerate(corexes):
# The learned model can be loaded again using ct.Corex().load(filename)
print('TC at layer %d is: %0.3f' % (l, corex.tc))
corex.save(options.output + '/layer_' + str(l) + '.dat')
else:
corexes = [ct.Corex().load(options.output + '/layer_' + str(l) + '.dat') for l in range(len(layers))]
# This line outputs plots showing relationships at the first layer
vis_rep(corexes[0], data=X, column_label=words, prefix=options.output)
# This line outputs a hierarchical networks structure in a .dot file in the "graphs" folder
# And it tries to compile the dot file into a pdf using the command line utility sfdp (part of graphviz)
vis_hierarchy(corexes, column_label=words, max_edges=options.max_edges, prefix=options.output)
| apache-2.0 |
amaurywalbert/twitter | redes/n1/n1_creating_network_v1.1.py | 1 | 12237 | # -*- coding: latin1 -*-
################################################################################################
# Script para coletar amigos a partir de um conjunto de alters do twitter
#
#
import datetime, sys, time, json, os, os.path, shutil, time, struct, random
import networkx as nx
import matplotlib.pyplot as plt
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
## Status - Versão 1 - Criar rede N1 (follow) a partir dos dados coletados e de acordo com as instruções a seguir:
## Versão 1.1 - Tentar corrigir problema de elevado consumo de memória durante a criação das redes.
## - Clear no grafo em 3 locais (deu certo - verificar qual dos 3??)
##
## # INPUT:
## - Lista de Egos (egos)
## - Lista Followee (alters) de cada Ego - Formação do conjunto de Alters
## - Lista Followee (followees) de cada Alter (ids)
##
## # ALGORITMO
## 0 - Para cada ego[i]:
## 1 - Inicializa o ego_[i] e todos os seus amigos (alters[i][n]) como vértices de um grafo - (tabela hash - ego+alters - vertices)
## 2 - Cria uma aresta direcionada entre o ego[i] e todos os alters (alter[i][n])
## 3 - Para cada elemento no conjunto de alters (alter[i][j]):
## 4 - Para cada elemento no conjunto de amigos do alter (followee[i][j][k]):
## 5 - Se followee[i][j][k] está no conjunto de vértices (tabela hash - ego+alters):
## 6 - Se não existe uma aresta direcionada entre alter[i][j] e followee[i][j][k]:
## 7 - Cria uma aresta direcionada entre alter[i][j] e followee[i][j][k]
## 8 -##### Senão: ##### Nessa rede não há peso nas arestas
## 9 -##### Adiciona peso na aresta. ##### Nessa rede não há peso nas arestas
##
##
######################################################################################################################################################################
################################################################################################
# Função para converter os arquivos binários em formato específico para ser usado na construção do grafo
# - Aqui há o retorno da lista de amigos de um alter (alter = amigo do ego)
################################################################################################
def read_arq_bin(file): # Função recebe o arquivo binário
with open(file, 'r') as f:
f.seek(0,2)
tamanho = f.tell()
f.seek(0)
friends_list = []
while f.tell() < tamanho:
buffer = f.read(user_struct.size)
friend = user_struct.unpack(buffer)
friends_list.append(friend[0])
return friends_list
######################################################################################################################################################################
#
# Converte formato data para armazenar em formato JSON
#
######################################################################################################################################################################
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
encoded_object = list(obj.timetuple())[0:6]
else:
encoded_object =json.JSONEncoder.default(self, obj)
return encoded_object
################################################################################################
# Função para salvar os grafos em formato padrão para entrada nos algoritmos de detecção
################################################################################################
def save_graph(ego, G): # Função recebe o id do ego corrente e o grafo (lista de arestas)
with open(output_dir+str(ego)+".edge_list", 'wb') as graph:
nx.write_edgelist(G, graph, data=False)
G.clear()
################################################################################################
# Gera as redes - grafos
################################################################################################
def ego_net(ego,alters_list,l): # Função recebe o id do ego, a lista de alters e o número ordinal do ego corrente
ep = 0 # Variável para armazenar os erros parciais - arquivos faltando referentes ao conjunto alters
G=nx.DiGraph() # Inicia um grafo DIRECIONADO
G.clear()
vertices = {} # Inicia tabela hash - Conjunto de vértices - EGO + ALTERS
out=[] # Lista de usuários faltando
ti = datetime.datetime.now() # Tempo do inicio da construção do grafo
vertices[ego] = ego # Adiciona o Ego ao conjunto de vértices
for alter in alters_list:
alter = long(alter)
vertices[alter] = alter # Adiciona cada Alter ao conjunto de vértices
# G.add_edge(ego,alter) # Cria uma aresta entre o Ego e cada Alter - Adiciona alter com arquivo em branco
i = 0
for alter in alters_list:
i+=1
try:
friends = read_arq_bin(alters_dir+str(alter)+".dat") # Recebe lista de amigos de cada alter
if friends:
G.add_edge(ego,alter) # Cria uma aresta entre o Ego e cada Alter - NÃO Adiciona alter com arquivo em branco
print i
for friend in friends: # Para cada amigo
friend = long(friend)
if vertices.has_key(friend): # Se amigo está na lista de alters
G.add_edge(alter,friend) ### Cria aresta
################################################################################################
######## Para os outros scripts - grafos ponderados
# if G.has_edge(alter,friend): ### Se existe uma aresta entre o alter e o amigo
# G[alter][friend]['weight']=+=1 ##### Adiciona peso na aresta - Nesta rede não há adição de peso nas arestas...
# else: # Senão
# G.add_edge(alter,friend,weight=1) # Cria aresta com peso 1
################################################################################################
except IOError as e: # Tratamento de exceção - caso falte algum arquivo de um amigo do alter,
ep +=1 # Incrementa os erros parciais (do ego corrente)
out.append(alter) # Adiciona alter à lista com usuários faltando
# print ("ERROR: "+str(e))
tf = datetime.datetime.now() # Tempo final da construção do grafo do ego corrente
tp = tf - ti # Cálculo do tempo gasto para a construção do grafo
print ("Lista de arestas do grafo "+str(l)+" construído com sucesso. EGO: "+str(ego))
print("Tempo para construir o grafo: "+str(tp))
return G,ep,out # Função retorna o grafo, o número de erros parciais e o tempo gasto para a construção do grafo
######################################################################################################################################################################
######################################################################################################################################################################
#
# Método principal do programa.
# Realiza teste e coleta dos dados de cada user especificado no arquivo.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
errors = 0 # Variável para armazenar o total de erros (arquivos faltando)
l = 0 # Variável para exibir o número ordinal do ego que está sendo usado para a construção do grafo
ti = datetime.datetime.now() # Tempo de início do processo de criação de todos os grafos
for file in os.listdir(egos_dir): # Para cada arquivo de Ego no diretório
l+=1 # Incrementa contador do número do Ego
ego = file.split(".dat") # Separa a extensão do id do usuário no nome do arquivo
ego = long(ego[0]) # recebe o id do usuário em formato Long
alters_list = read_arq_bin(egos_dir+file) # Chama função para converter o conjunto de amigos do ego do formato Binário para uma lista do python
friends = len(alters_list) # Variável que armazena o tamanho da lista do usuário corrente
print("######################################################################")
print ("Construindo grafo do ego n: "+str(l)+"/"+str(qtde_egos)+" - Quantidade de amigos: "+str(friends))
G, ep, out = ego_net(ego,alters_list, l) # Inicia função de criação do grafo (lista de arestas) para o ego corrente
print("Quantidade de usuários faltando: "+str(ep))
print
print("Salvando o grafo...")
save_graph(ego,G)
G.clear()
print("######################################################################")
print
errors+=ep # Incrementa erros totais com erros parciais recebidos da criação do grafo do ego corrente
overview = {'ego':ego,'friends':friends,'errors':ep,'out':out} # cria dicionário python com informações sobre a criação do grafo do ego corrente
with open(output_overview+str(ego)+".json", 'w') as f:
f.write(json.dumps(overview)) # Escreve o dicionário python em formato JSON no arquivo overview
tf = datetime.datetime.now() # Recebe tempo final do processo de construção dos grafos
t = tf - ti # Calcula o tempo gasto com o processo de criação dos grafos
print("Tempo total do script: "+str(t))
print("Quantidade total de usuários faltando: "+str(errors))
print("######################################################################")
print("Networks created!")
print("######################################################################\n")
######################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
qtde_egos = 10 # 50, 100, 500, full
######################################################################################################################
######################################################################################################################
egos_dir = "/home/amaury/coleta/n1/egos_friends/"+str(qtde_egos)+"/bin/"######### Diretório contendo os arquivos dos Egos
alters_dir = "/home/amaury/coleta/n1/alters_friends/"+str(qtde_egos)+"/bin/" #### Diretório contendo os arquivos dos Alters
output_dir = "/home/amaury/redes/n1/"+str(qtde_egos)+"/graphs/" ################# Diretório para armazenamento dos arquivos das listas de arestas
output_dir_errors = "/home/amaury/redes/n1/"+str(qtde_egos)+"/errors/" ########## Diretório para armazenamento dos erros
output_overview = "/home/amaury/redes/n1/"+str(qtde_egos)+"/overview/" ################### Diretório contendo arquivos com informações sobre a construção das redes.
formato = 'l' ######################################################### Long para o código ('l') e depois o array de chars de X posições:
user_struct = struct.Struct(formato) ############################################ Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário
######################################################################################################################
######################################################################################################################
#Cria os diretórios para armazenamento dos arquivos
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_overview):
os.makedirs(output_overview)
#Executa o método main
if __name__ == "__main__": main() | gpl-3.0 |
akloster/bokeh | bokeh/charts/builder/step_builder.py | 43 | 5445 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Step class which lets you build your Step charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from six import string_types
from ..utils import cycle_colors
from .._builder import create_and_build, Builder
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Step(values, index=None, **kws):
""" Create a step chart using :class:`StepBuilder <bokeh.charts.builder.step_builder.StepBuilder>`
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Step, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
step = Step(xyvalues, title="Steps", legend="top_left", ylabel='Languages')
output_file('step.html')
show(step)
"""
return create_and_build(StepBuilder, values, index=index, **kws)
class StepBuilder(Builder):
"""This is the Step class and it is in charge of plotting
Step charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the
source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""It calculates the chart properties accordingly from Step.values.
Then build a dict containing references to all the points to be
used by the segment glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
self._groups = []
orig_xs = self._values_index
xs = np.empty(2*len(orig_xs)-1, dtype=np.int)
xs[::2] = orig_xs[:]
xs[1::2] = orig_xs[1:]
self._data['x'] = xs
for i, col in enumerate(self._values.keys()):
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
orig_ys = np.array([self._values[col][x] for x in orig_xs])
ys = np.empty(2*len(orig_ys)-1)
ys[::2] = orig_ys[:]
ys[1::2] = orig_ys[:-1]
self._data['y_%s' % col] = ys
def _set_sources(self):
""" Push the Step data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
#y_sources = [sc.columns("y_%s" % col) for col in self._groups]
self.y_range = DataRange1d()
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Step.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._groups, self.palette)
for i, name in enumerate(self._groups):
# draw the step horizontal segment
glyph = Line(x="x", y="y_%s" % name, line_color=colors[i], line_width=2)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
kmike/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 12 | 1962 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
pl.plot(enet.coef_, label='Elastic net coefficients')
pl.plot(lasso.coef_, label='Lasso coefficients')
pl.plot(coef, '--', label='original coefficients')
pl.legend(loc='best')
pl.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
pl.show()
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.py | 14 | 4604 | # Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
if __name__ == "__main__":
run_module_suite()
| mit |
qiudebo/13learn | code/matplotlib/lab/test_head_map.py | 1 | 2102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'qiudebo'
import numpy as NP
A = NP.array([
[6.55,6.76,7.32,5.6,5.94,],
[0.01,0.01,0.04,0.02,0.11,],
[6.45,6.29,4.34,4.57,7.15,],
[8.73,10.67,6.9,8.25,8.53,],
[0.03,0.01,0.05,0.01,0.07,],
[1.36,1.41,0.8,0.98,1.36,],
[0,0,0,0,0.01,],
[2.09,2.93,1.94,1.96,3.56,],
[3.61,3.37,2.62,6.99,4.6,],
[6.08,7.04,4.72,4.78,7.2,],
[1.67,0.92,0.62,3.87,0.75,],
[0.01,0,0,0.11,0.18,],
[0.03,0,0.13,0.03,0.24,],
[1.66,1.8,1.81,1.81,2.12,],
[3.37,3.48,4.39,3.02,3.2,],
[4.77,4.91,8.62,5.35,4.68,],
[7.58,7.9,3.02,7.57,8.15,],
[7.59,7.79,9.92,8.17,7.61,],
[3.59,3.46,2.54,2.99,2.68,],
[2.51,3.82,4.57,2.56,3.19,],
[1.74,2.38,5.4,2.05,2.24,],
[4.71,3.05,5.12,2.73,4.18,],
[0.85,0.93,2.47,0.83,1.12,],
[11.62,12.01,10.43,12.49,12.42,],
[13.4,9.06,12.24,13.26,8.71,],
])
from matplotlib import pyplot as PLT
from matplotlib import cm as CM
from matplotlib import axes
# 设定一个图像,背景为白色。
fig = PLT.figure(facecolor='w')
#注意位置坐标,数字表示的是坐标的比例
ax1 = fig.add_subplot(2,1,1,position=[0.1,0.15,0.9,0.8])
# 注意标记旋转的角度
ax1.set_xticklabels(['','A','B','C','D','E'], range(6), rotation=-45)
# select the color map
#可以有多种选择,这里我最终选择的是spectral,那个1000是热度标尺被分隔成多少块,数字越多,颜色区分越细致。
#cmap = CM.get_cmap('RdYlBu_r', 1000)
#cmap = CM.get_cmap('rainbow', 1000)
cmap = CM.get_cmap('spectral', 1000)
# map the colors/shades to your data
#那个vmin和vmax是数据矩阵中的最大和最小值。这个范围要与数据的范围相协调。
#那个aspect参数,对确定图形在整个图中的位置和大小有关系。上面的add_subplot中的position参数的数值要想有作用,这里的这个参数一定要选auto。
map = ax1.imshow(A, interpolation="nearest", cmap=cmap,aspect='auto', vmin=0,vmax=15)
#shrink是标尺缩小的比例
cb = PLT.colorbar(mappable=map, cax=None, ax=None,shrink=0.5)
cb.set_label('(%)')
# plot it
PLT.show() | mit |
slipguru/ignet | icing/plotting/silhouette.py | 2 | 20601 | #!/usr/bin/env python
"""Plotting functions for silhouette analysis.
Author: Federico Tomasi
Copyright (c) 2016, Federico Tomasi.
Licensed under the FreeBSD license (see LICENSE.txt).
"""
from __future__ import print_function, division
import logging
import matplotlib; matplotlib.use("Agg")
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import os
import pandas as pd
import scipy
import seaborn as sns; sns.set_context('notebook')
import sys; sys.setrecursionlimit(10000)
from scipy.cluster.hierarchy import linkage, fcluster
# from sklearn.cluster import SpectralClustering
from sklearn.cluster import AffinityPropagation
from sklearn.metrics import silhouette_samples # , silhouette_score
from icing.externals import SpectralClustering
from icing.externals import Tango
from icing.utils import extra
def plot_clusters_silhouette(X, cluster_labels, n_clusters, root='',
file_format='pdf'):
"""Plot the silhouette score for each cluster, given the distance matrix X.
Parameters
----------
X : array_like, shape [n_samples_a, n_samples_a]
Distance matrix.
cluster_labels : array_like
List of integers which represents the cluster of the corresponding
point in X. The size must be the same has a dimension of X.
n_clusters : int
The number of clusters.
root : str, optional
The root path for the output creation
file_format : ('pdf', 'png')
Choose the extension for output images.
"""
# Create a subplot with 1 row and 2 columns
fig, (ax1) = plt.subplots(1, 1)
fig.set_size_inches(20, 15)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
# ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels,
metric="precomputed")
silhouette_avg = np.mean(sample_silhouette_values)
logging.info("Average silhouette_score: %.4f", silhouette_avg)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
# ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("silhouette coefficient values")
ax1.set_ylabel("cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.suptitle(("Silhouette analysis (n_clusters {}, avg score {:.4f}, "
"tot Igs {}".format(n_clusters, silhouette_avg, X.shape[0])),
fontsize=14, fontweight='bold')
filename = os.path.join(root, 'silhouette_analysis_{}.{}'
.format(extra.get_time(), file_format))
fig.savefig(filename)
logging.info('Figured saved %s', filename)
def best_intersection(id_list, cluster_dict):
"""Compute jaccard index between id_list and each list in dict."""
set1 = set(id_list)
best_score = (0., 0., 0.) # res, numerator, denominator
best_set = ()
best_key = -1
for k in cluster_dict:
set2 = set(cluster_dict[k])
numerator = len(set1 & set2)
denominator = len(set1 | set2)
score = numerator / denominator
if score > best_score[0] or best_key == -1:
best_score = (score, numerator, denominator)
best_set = set2.copy()
best_key = k
# print(set1, "and best", best_set, best_score)
if best_key != -1:
del cluster_dict[best_key]
return best_score, cluster_dict, best_set
def save_results_clusters(filename, sample_names, cluster_labels):
with open(filename, 'w') as f:
for a, b in zip(sample_names, cluster_labels):
f.write("{}, {}\n".format(a, b))
def single_silhouette_dendrogram(dist_matrix, Z, threshold, mode='clusters',
method='single', sample_names=None):
"""Compute the average silhouette at a given threshold.
Parameters
----------
dist_matrix : array-like
Precomputed distance matrix between points.
Z : array-like
Linkage matrix, results of scipy.cluster.hierarchy.linkage.
threshold : float
Specifies where to cut the dendrogram.
mode : ('clusters', 'thresholds'), optional
Choose what to visualise on the x-axis.
Returns
-------
x : float
Based on mode, it can contains the number of clusters or threshold.
silhouette_avg : float
The average silhouette.
"""
cluster_labels = fcluster(Z, threshold, 'distance')
nclusts = np.unique(cluster_labels).shape[0]
save_results_clusters("res_{}_{:03d}_clust.csv".format(method, nclusts),
sample_names, cluster_labels)
try:
silhouette_list = silhouette_samples(dist_matrix, cluster_labels,
metric="precomputed")
silhouette_avg = np.mean(silhouette_list)
x = max(cluster_labels) if mode == 'clusters' else threshold
except ValueError as e:
if max(cluster_labels) == 1:
x = 1 if mode == 'clusters' else threshold
silhouette_avg = 0
else:
raise(e)
return x, silhouette_avg
def multi_cut_dendrogram(dist_matrix, Z, threshold_arr, n, mode='clusters',
method='single', n_jobs=-1, sample_names=None):
"""Cut a dendrogram at some heights.
Parameters
----------
dist_matrix : array-like
Precomputed distance matrix between points.
Z : array-like
Linkage matrix, results of scipy.cluster.hierarchy.linkage.
threshold_arr : array-like
One-dimensional array which contains the thresholds where to cut the
dendrogram.
n : int
Length of threshold_arr
mode : ('clusters', 'thresholds'), optional
Choose what to visualise on the x-axis.
Returns
-------
queue_{x, y} : array-like
The results to be visualised on a plot.
"""
def _internal(dist_matrix, Z, threshold_arr, idx, n_jobs, arr_length,
queue_x, queue_y, mode='clusters', method='single',
sample_names=None):
for i in range(idx, arr_length, n_jobs):
queue_x[i], queue_y[i] = single_silhouette_dendrogram(
dist_matrix, Z, threshold_arr[i], mode, method, sample_names)
if n_jobs == -1:
n_jobs = min(mp.cpu_count(), n)
queue_x, queue_y = mp.Array('d', [0.] * n), mp.Array('d', [0.] * n)
ps = []
try:
for idx in range(n_jobs):
p = mp.Process(target=_internal,
args=(dist_matrix, Z, threshold_arr, idx, n_jobs, n,
queue_x, queue_y, mode, method, sample_names))
p.start()
ps.append(p)
for p in ps:
p.join()
except (KeyboardInterrupt, SystemExit):
extra.term_processes(ps, 'Exit signal received\n')
except BaseException as e:
extra.term_processes(ps, 'ERROR: %s\n' % e)
return queue_x, queue_y
def plot_average_silhouette_dendrogram(
X, method_list=None, mode='clusters', n=20, min_threshold=0.02,
max_threshold=0.8, verbose=True, interactive_mode=False,
file_format='pdf', xticks=None, xlim=None, figsize=None, n_jobs=-1,
sample_names=None):
"""Plot average silhouette for each tree cutting.
A linkage matrix for each method in method_list is used.
Parameters
----------
X : array-like
Symmetric 2-dimensional distance matrix.
method_list : array-like, optional
String array which contains a list of methods for computing the
linkage matrix. If None, all the avalable methods will be used.
mode : ('clusters', 'threshold')
Choose what to visualise on x-axis.
n : int, optional
Choose at how many heights dendrogram must be cut.
verbose : boolean, optional
How many output messages visualise.
interactive_mode : boolean, optional
True: final plot will be visualised and saved.
False: final plot will be only saved.
file_format : ('pdf', 'png')
Choose the extension for output images.
Returns
-------
filename : str
The output filename.
"""
if method_list is None:
method_list = ('single', 'complete', 'average', 'weighted',
'centroid', 'median', 'ward')
plt.close()
if figsize is not None:
fig = plt.figure(figsize=figsize)
fig, ax = (plt.gcf(), plt.gca()) # if plt.get_fignums() else plt.subplots()
fig.suptitle("Average silhouette for each tree cutting")
# print_utils.ax_set_defaults(ax)
# convert distance matrix into a condensed one
dist_arr = scipy.spatial.distance.squareform(X)
for method in method_list:
if verbose:
print("Compute linkage with method = {}...".format(method))
Z = linkage(dist_arr, method=method, metric='euclidean')
if method == 'ward':
threshold_arr = np.linspace(np.percentile(Z[:, 2], 70), max(Z[:, 2]), n)
else:
threshold_arr = np.linspace(min_threshold, max_threshold, n)
max_i = max(Z[:, 2]) if method != 'ward' else np.percentile(Z[:, 2], 99.5)
threshold_arr *= max_i
x, y = multi_cut_dendrogram(X, Z, threshold_arr, n, mode, method, n_jobs,
sample_names=sample_names)
ax.plot(x, y, Tango.nextDark(), marker='o', ms=3, ls='-', label=method)
# fig.tight_layout()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
leg = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# leg = ax.legend(loc='lower right')
leg.get_frame().set_linewidth(0.0)
ax.set_xlabel(mode[0].upper() + mode[1:])
ax.set_ylabel("Silhouette")
if xticks is not None:
plt.xticks(xticks)
if xlim is not None:
plt.xlim(xlim)
plt.margins(.2)
plt.subplots_adjust(bottom=0.15)
if interactive_mode:
plt.show()
path = "results"
extra.mkpath(path)
filename = os.path.join(path, "result_silhouette_hierarchical_{}.{}"
.format(extra.get_time(), file_format))
fig.savefig(filename, bbox_extra_artists=(leg,), bbox_inches='tight')
# fig.savefig(filename, dpi=300, format='png')
return filename
def multi_cut_spectral(cluster_list, affinity_matrix, dist_matrix, n_jobs=-1,
sample_names=None):
"""Perform a spectral clustering with variable cluster sizes.
Parameters
----------
cluster_list : array-like
Contains the list of the number of clusters to use at each step.
affinity_matrix : array-like
Precomputed affinity matrix.
dist_matrix : array-like
Precomputed distance matrix between points.
Returns
-------
queue_y : array-like
Array to be visualised on the y-axis. Contains the list of average
silhouette for each number of clusters present in cluster_list.
"""
def _internal(cluster_list, affinity_matrix, dist_matrix,
idx, n_jobs, n, queue_y):
for i in range(idx, n, n_jobs):
sp = SpectralClustering(n_clusters=cluster_list[i],
affinity='precomputed',
norm_laplacian=True,
n_init=1000)
sp.fit(affinity_matrix)
save_results_clusters("res_spectral_{:03d}_clust.csv"
.format(cluster_list[i]),
sample_names, sp.labels_)
silhouette_list = silhouette_samples(dist_matrix, sp.labels_,
metric="precomputed")
queue_y[i] = np.mean(silhouette_list)
n = len(cluster_list)
if n_jobs == -1:
n_jobs = min(mp.cpu_count(), n)
queue_y = mp.Array('d', [0.] * n)
ps = []
try:
for idx in range(n_jobs):
p = mp.Process(target=_internal,
args=(cluster_list, affinity_matrix, dist_matrix,
idx, n_jobs, n, queue_y))
p.start()
ps.append(p)
for p in ps:
p.join()
except (KeyboardInterrupt, SystemExit):
extra.term_processes(ps, 'Exit signal received\n')
except BaseException as e:
extra.term_processes(ps, 'ERROR: %s\n' % e)
return queue_y
def plot_average_silhouette_spectral(
X, n=30, min_clust=10, max_clust=None, verbose=True,
interactive_mode=False, file_format='pdf', n_jobs=-1,
sample_names=None, affinity_delta=.2, is_affinity=False):
"""Plot average silhouette for some clusters, using an affinity matrix.
Parameters
----------
X : array-like
Symmetric 2-dimensional distance matrix.
verbose : boolean, optional
How many output messages visualise.
interactive_mode : boolean, optional
True: final plot will be visualised and saved.
False: final plot will be only saved.
file_format : ('pdf', 'png')
Choose the extension for output images.
Returns
-------
filename : str
The output filename.
"""
X = extra.ensure_symmetry(X)
A = extra.distance_to_affinity_matrix(X, delta=affinity_delta) if not is_affinity else X
plt.close()
fig, ax = (plt.gcf(), plt.gca())
fig.suptitle("Average silhouette for each number of clusters")
if max_clust is None:
max_clust = X.shape[0]
cluster_list = np.unique(map(int, np.linspace(min_clust, max_clust, n)))
y = multi_cut_spectral(cluster_list, A, X, n_jobs=n_jobs,
sample_names=sample_names)
ax.plot(cluster_list, y, Tango.next(), marker='o', linestyle='-', label='')
# leg = ax.legend(loc='lower right')
# leg.get_frame().set_linewidth(0.0)
ax.set_xlabel("Clusters")
ax.set_ylabel("Silhouette")
fig.tight_layout()
if interactive_mode:
plt.show()
path = "results"
extra.mkpath(path)
filename = os.path.join(path, "result_silhouette_spectral_{}.{}"
.format(extra.get_time(), file_format))
plt.savefig(filename)
plt.close()
# plot eigenvalues
from adenine.core import plotting
plotting.eigs(
'', A,
filename=os.path.join(path, "eigs_spectral_{}.{}"
.format(extra.get_time(), file_format)),
n_components=50,
normalised=True,
rw=True
)
return filename
def multi_cut_ap(preferences, affinity_matrix, dist_matrix, n_jobs=-1,
sample_names=None):
"""Perform a AP clustering with variable cluster sizes.
Parameters
----------
cluster_list : array-like
Contains the list of the number of clusters to use at each step.
affinity_matrix : array-like
Precomputed affinity matrix.
dist_matrix : array-like
Precomputed distance matrix between points.
Returns
-------
queue_y : array-like
Array to be visualised on the y-axis. Contains the list of average
silhouette for each number of clusters present in cluster_list.
"""
def _internal(preferences, affinity_matrix, dist_matrix,
idx, n_jobs, n, queue_y):
for i in range(idx, n, n_jobs):
ap = AffinityPropagation(preference=preferences[i],
affinity='precomputed',
max_iter=500)
ap.fit(affinity_matrix)
cluster_labels = ap.labels_.copy()
nclusts = np.unique(cluster_labels).shape[0]
save_results_clusters("res_ap_{:03d}_clust.csv"
.format(nclusts),
sample_names, ap.labels_)
if nclusts > 1:
try:
silhouette_list = silhouette_samples(dist_matrix, ap.labels_,
metric="precomputed")
queue_y[i] = np.mean(silhouette_list)
except BaseException:
print(dist_matrix.shape, ap.labels_.shape)
n = len(preferences)
if n_jobs == -1:
n_jobs = min(mp.cpu_count(), n)
queue_y = mp.Array('d', [0.] * n)
ps = []
try:
for idx in range(n_jobs):
p = mp.Process(target=_internal,
args=(preferences, affinity_matrix, dist_matrix,
idx, n_jobs, n, queue_y))
p.start()
ps.append(p)
for p in ps:
p.join()
except (KeyboardInterrupt, SystemExit):
extra.term_processes(ps, 'Exit signal received\n')
except BaseException as e:
extra.term_processes(ps, 'ERROR: %s\n' % e)
return queue_y
def plot_average_silhouette_ap(
X, n=30, verbose=True,
interactive_mode=False, file_format='pdf', n_jobs=-1,
sample_names=None, affinity_delta=.2, is_affinity=False):
"""Plot average silhouette for some clusters, using an affinity matrix.
Parameters
----------
X : array-like
Symmetric 2-dimensional distance matrix.
verbose : boolean, optional
How many output messages visualise.
interactive_mode : boolean, optional
True: final plot will be visualised and saved.
False: final plot will be only saved.
file_format : ('pdf', 'png')
Choose the extension for output images.
Returns
-------
filename : str
The output filename.
"""
X = extra.ensure_symmetry(X)
A = extra.distance_to_affinity_matrix(X, delta=affinity_delta) if not is_affinity else X
plt.close()
fig, ax = (plt.gcf(), plt.gca())
fig.suptitle("Average silhouette for each number of clusters")
# dampings = np.linspace(.5, 1, n+1)[:-1]
# from sklearn.metrics.pairwise import pairwise_distances
# S = -pairwise_distances(X, metric='precomputed', squared=True)
preferences = np.append(np.linspace(np.min(A), np.median(A), n - 1),
np.median(A))
y = multi_cut_ap(preferences, A, X, n_jobs=n_jobs,
sample_names=sample_names)
ax.plot(preferences, y, Tango.next(), marker='o', linestyle='-', label='')
# leg = ax.legend(loc='lower right')
# leg.get_frame().set_linewidth(0.0)
ax.set_xlabel("Clusters")
ax.set_ylabel("Silhouette")
fig.tight_layout()
if interactive_mode:
plt.show()
plt.close()
return None
if __name__ == '__main__':
"""Example of usage.
silhouette.plot_average_silhouette_dendrogram(
X, min_threshold=.7, max_threshold=1.1, n=200, xticks=range(0, 50, 4),
xlim=[0, 50], figsize=(20, 8),
method_list=('median', 'ward', 'complete'))
silhouette.plot_average_silhouette_spectral(X, min_clust=2, max_clust=10, n=10)
"""
| bsd-2-clause |
mac389/lovasi | src/orthogonalize-topics-xkcd.py | 1 | 2933 | import numpy as np
import matplotlib.pyplot as plt
import Graphics as artist
import matplotlib.gridspec as gridspec
from awesome_print import ap
plt.xkcd()
def unique_words(aStr):
return ' '.join([word for word in set(aStr.split())])
def princomp(A,numpc=3):
# computing eigenvalues and eigenvectors of covariance matrix
M = (A-np.mean(A.T,axis=1)).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M))
p = np.size(coeff,axis=1)
idx = np.argsort(latent) # sorting the eigenvalues
idx = idx[::-1] # in ascending order
# sorting eigenvectors according to the sorted eigenvalues
coeff = coeff[:,idx]
latent = latent[idx] # sorting eigenvalues
if numpc < p and numpc >= 0:
coeff = coeff[:,range(numpc)] # cutting some PCs if needed
score = np.dot(coeff.T,M) # projection of the data in the new space
return coeff,score,latent
TEXT = 1
basis_vectors = [unique_words(line.split(':')[TEXT]) for line in open('../data/lda-topics.txt','rb').read().splitlines()]
def jaccard_similarity(a,b):
a = set(a)
b = set(b)
try:
return len(a & b)/float(len(a | b))
except:
return 0
#ap(basis_vectors)
basis_vectors_similarity = np.array([[jaccard_similarity(one,two) for one in basis_vectors] for two in basis_vectors])
eigvecs,proj,eigvals = princomp(basis_vectors_similarity,numpc=basis_vectors_similarity.shape[1])
for name,matrix in [('eigvecs',eigvecs),('projections',proj),('eigvals',eigvals)]:
np.savetxt('../data/%s'%name,matrix,fmt='%.02f')
max_color = max(basis_vectors_similarity.max(),eigvecs.max(),np.corrcoef(eigvecs.T).max())
min_color = min(basis_vectors_similarity.min(),eigvecs.min(),np.corrcoef(eigvecs.T).max())
fig = plt.figure(figsize=(12,5))
gspec = gridspec.GridSpec(1, 3,width_ratios=[1,1,1])
non_orthogonal = plt.subplot(gspec[0])
loading_matrix = plt.subplot(gspec[2])
orthogonal = plt.subplot(gspec[1])
cno = non_orthogonal.imshow(basis_vectors_similarity,interpolation='nearest',aspect='auto',vmax=max_color,vmin=min_color)
artist.adjust_spines(non_orthogonal)
non_orthogonal.set_xlabel('Topic')
non_orthogonal.set_ylabel('Topic')
cbar_no = fig.colorbar(cno,ax=non_orthogonal)
cbar_no.set_label('Jaccard Similarity ')
cax_load = loading_matrix.imshow(eigvecs,interpolation='nearest',aspect='auto',vmax=max_color,vmin=min_color)
artist.adjust_spines(loading_matrix)
loading_matrix.set_xlabel('Eigentopic')
loading_matrix.set_ylabel('Topic')
cbar_load = fig.colorbar(cax_load,ax=loading_matrix,use_gridspec=True)
cbar_load.set_label('Loading Weight')
cax = orthogonal.imshow(np.corrcoef(eigvecs.T),interpolation='nearest',aspect='auto',vmax=max_color,vmin=min_color)
artist.adjust_spines(orthogonal)
orthogonal.set_xlabel('Eigentopic')
orthogonal.set_ylabel('Eigentopic')
cbar = fig.colorbar(cax,ax=orthogonal,use_gridspec=True)
cbar.set_label('Correlation Coefficient')
gspec.tight_layout(fig)
plt.savefig('../images/Jaccard->LDA-xkcd.png',dpi=300) | mit |
MingdaZhou/gnuradio | gr-filter/examples/reconstruction.py | 49 | 5015 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import channels
except ImportError:
print "Error: Program requires gr-channels."
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
fftlen = 8192
def main():
N = 10000
fs = 2000.0
Ts = 1.0/fs
t = scipy.arange(0, N*Ts, Ts)
# When playing with the number of channels, be careful about the filter
# specs and the channel map of the synthesizer set below.
nchans = 10
# Build the filter(s)
bw = 1000
tb = 400
proto_taps = filter.firdes.low_pass_2(1, nchans*fs,
bw, tb, 80,
filter.firdes.WIN_BLACKMAN_hARRIS)
print "Filter length: ", len(proto_taps)
# Create a modulated signal
npwr = 0.01
data = scipy.random.randint(0, 256, N)
rrc_taps = filter.firdes.root_raised_cosine(1, 2, 1, 0.35, 41)
src = blocks.vector_source_b(data.astype(scipy.uint8).tolist(), False)
mod = digital.bpsk_mod(samples_per_symbol=2)
chan = channels.channel_model(npwr)
rrc = filter.fft_filter_ccc(1, rrc_taps)
# Split it up into pieces
channelizer = filter.pfb.channelizer_ccf(nchans, proto_taps, 2)
# Put the pieces back together again
syn_taps = [nchans*t for t in proto_taps]
synthesizer = filter.pfb_synthesizer_ccf(nchans, syn_taps, True)
src_snk = blocks.vector_sink_c()
snk = blocks.vector_sink_c()
# Remap the location of the channels
# Can be done in synth or channelizer (watch out for rotattions in
# the channelizer)
synthesizer.set_channel_map([ 0, 1, 2, 3, 4,
15, 16, 17, 18, 19])
tb = gr.top_block()
tb.connect(src, mod, chan, rrc, channelizer)
tb.connect(rrc, src_snk)
vsnk = []
for i in xrange(nchans):
tb.connect((channelizer,i), (synthesizer, i))
vsnk.append(blocks.vector_sink_c())
tb.connect((channelizer,i), vsnk[i])
tb.connect(synthesizer, snk)
tb.run()
sin = scipy.array(src_snk.data()[1000:])
sout = scipy.array(snk.data()[1000:])
# Plot original signal
fs_in = nchans*fs
f1 = pylab.figure(1, figsize=(16,12), facecolor='w')
s11 = f1.add_subplot(2,2,1)
s11.psd(sin, NFFT=fftlen, Fs=fs_in)
s11.set_title("PSD of Original Signal")
s11.set_ylim([-200, -20])
s12 = f1.add_subplot(2,2,2)
s12.plot(sin.real[1000:1500], "o-b")
s12.plot(sin.imag[1000:1500], "o-r")
s12.set_title("Original Signal in Time")
start = 1
skip = 2
s13 = f1.add_subplot(2,2,3)
s13.plot(sin.real[start::skip], sin.imag[start::skip], "o")
s13.set_title("Constellation")
s13.set_xlim([-2, 2])
s13.set_ylim([-2, 2])
# Plot channels
nrows = int(scipy.sqrt(nchans))
ncols = int(scipy.ceil(float(nchans)/float(nrows)))
f2 = pylab.figure(2, figsize=(16,12), facecolor='w')
for n in xrange(nchans):
s = f2.add_subplot(nrows, ncols, n+1)
s.psd(vsnk[n].data(), NFFT=fftlen, Fs=fs_in)
s.set_title("Channel {0}".format(n))
s.set_ylim([-200, -20])
# Plot reconstructed signal
fs_out = 2*nchans*fs
f3 = pylab.figure(3, figsize=(16,12), facecolor='w')
s31 = f3.add_subplot(2,2,1)
s31.psd(sout, NFFT=fftlen, Fs=fs_out)
s31.set_title("PSD of Reconstructed Signal")
s31.set_ylim([-200, -20])
s32 = f3.add_subplot(2,2,2)
s32.plot(sout.real[1000:1500], "o-b")
s32.plot(sout.imag[1000:1500], "o-r")
s32.set_title("Reconstructed Signal in Time")
start = 0
skip = 4
s33 = f3.add_subplot(2,2,3)
s33.plot(sout.real[start::skip], sout.imag[start::skip], "o")
s33.set_title("Constellation")
s33.set_xlim([-2, 2])
s33.set_ylim([-2, 2])
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
backtou/longlab | gr-filter/examples/fmtest.py | 12 | 7793 | #!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
from gnuradio import filter
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
fmtx = blks2.nbfm_tx (audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = gr.sig_source_c (if_rate, # sample rate
gr.GR_SIN_WAVE, # waveform type
lo_freq, #frequency
1.0, # amplitude
0) # DC Offset
mixer = gr.multiply_cc ()
self.connect (self, fmtx, (mixer, 0))
self.connect (lo, (mixer, 1))
self.connect (mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = gr.add_cc ()
for n in xrange(self._N):
sig = gr.sig_source_f(self._audio_rate, gr.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = gr.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = gr.vector_sink_c()
self.channel = blks2.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(blks2.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(blks2.standard_squelch(self._audio_rate*10))
self.snks.append(gr.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
mkrzewic/AliPhysics | PWGPP/FieldParam/fitsol.py | 39 | 8343 | #!/usr/bin/env python
debug = True # enable trace
def trace(x):
global debug
if debug: print(x)
trace("loading...")
from itertools import combinations, combinations_with_replacement
from glob import glob
from math import *
import operator
from os.path import basename
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.feature_selection
import datetime
def prec_from_pathname(path):
if '2k' in path: return 0.002
elif '5k' in path: return 0.005
else: raise AssertionError('Unknown field strengh: %s' % path)
# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]
def combinatrial_vars(vars_str='xyz', length=3):
term_list = []
for l in range(length):
term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])
return term_list
# product :: a#* => [a] -> a
def product(xs):
return reduce(operator.mul, xs, 1) # foldl in Haskell
# (XYZ, "xx") -> XX
def term(dataframe, vars_str):
return product(map(lambda x: dataframe[x], list(vars_str)))
# (f(X), Y) -> (max deviation, max%, avg dev, avg%)
def deviation_stat(fX, Y, prec=0.005):
dev = np.abs(fX - Y)
(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))
(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)
return (max_dev, max_pct, avg_dev, avg_pct)
# IO Df
def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):
sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']
df = pd.read_csv(path, sep=' ', names=sample_cols)
if cylindrical_axis:
df['r'] = np.sqrt(df.x**2 + df.y**2)
df['p'] = np.arctan2(df.y, df.x)
df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)
df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)
df['Br'] = df.Bt * np.cos(df.Bpsi)
df['Bp'] = df.Bt * np.sin(df.Bpsi)
if absolute_axis:
df['X'] = np.abs(df.x)
df['Y'] = np.abs(df.y)
df['Z'] = np.abs(df.z)
for var in genvars:
df[var] = term(df, var)
return df
def choose(vars, df1, df2):
X1 = df1.loc[:, vars].as_matrix()
X2 = df2.loc[:, vars].as_matrix()
return (X1, X2)
# IO ()
def run_analysis_for_all_fields():
sample_set = glob("dat_z22/*2k*.sample.dat")
test_set = glob("dat_z22/*2k*.test.dat")
#print(sample_set, test_set)
assert(len(sample_set) == len(test_set) and len(sample_set) > 0)
result = pd.DataFrame()
for i, sample_file in enumerate(sample_set):
trace("run_analysis('%s', '%s')" % (sample_file, test_set[i]))
df = run_analysis(sample_file, test_set[i])
result = result.append(df, ignore_index=True)
write_header(result)
def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',
test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):
global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result
precision = prec_from_pathname(sample_file)
assert(precision == prec_from_pathname(test_file))
xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims
trace("reading training samples... " + sample_file)
df = load_samples(sample_file, genvars=xvars_full)
trace("reading test samples..." + test_file)
test = load_samples(test_file, genvars=xvars_full)
trace("linear regression fit...")
lr = sklearn.linear_model.LinearRegression()
#ri = sklearn.linear_model.RidgeCV()
#la = sklearn.linear_model.LassoCV()
fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)
#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']
#xvars = ["xx", "yy", "zz", 'x', 'y', 'z', 'xzz', 'yzz']
#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']
#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']
yvars = ['Bx', 'By', 'Bz']
#yvars = ['Bz']
(Y, Ytest) = choose(yvars, df, test)
#(Y, Ytest) = (df['Bz'], test['Bz'])
xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power
(X, Xtest) = choose(xvars, df, test)
for y in yvars:
fs.fit(X, df[y])
res = pd.DataFrame({ "term": xvars, "rank": fs.ranking_ })
trace(y)
trace(res.sort_values(by = "rank"))
#xvars=list(res.sort_values(by="rank")[:26]['term'])
lr.fit(X, Y)
trace(', '.join(yvars) + " = 1 + " + ' + '.join(xvars))
test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#for i in range(len(yvars)):
# arr = [lr.intercept_[i]] + lr.coef_[i]
# arr = [ str(x) for x in arr ]
# print(yvars[i] + " = { " + ', '.join(arr) + " }")
# print("deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))
(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
trace("linear regression R^2 [train data]: %.8f" % sample_score)
trace("linear regression R^2 [test data] : %.8f" % test_score)
return pd.DataFrame(
{ "xvars": [xvars],
"yvars": [yvars],
"max_dev": [test_dev[0]],
"max%": [test_dev[1]],
"avg_dev": [test_dev[2]],
"avg%": [test_dev[3]],
"sample_score": [sample_score],
"score": [test_score],
"coeffs": [lr.coef_],
"intercept": [lr.intercept_],
"sample_file": [sample_file],
"test_file": [test_file],
"precision": [precision],
"volume_id": [volume_id_from_path(sample_file)]
})
def volume_id_from_path(path):
return basename(path)\
.replace('.sample.dat', '')\
.replace('-', '_')
def get_location_by_volume_id(id):
if 'its' in id: r_bin = 0
if 'tpc' in id: r_bin = 1
if 'tof' in id: r_bin = 2
if 'tofext' in id: r_bin = 3
if 'cal' in id: r_bin = 4
z_bin = int(id.split('_')[1][1:]) # "tofext2k_z0_q4" -> 0
if 'q1' in id: quadrant = 0
if 'q2' in id: quadrant = 1
if 'q3' in id: quadrant = 2
if 'q4' in id: quadrant = 3
return r_bin, z_bin, quadrant
def write_header(result):
#result.to_csv("magfield_params.csv")
#result.to_html("magfield_params.html")
print("# This file was generated from sysid.py at " + str(datetime.datetime.today()))
print("# " + ', '.join(result.iloc[0].yvars) + " = 1 + " + ' + '.join(result.iloc[0].xvars))
print("# barrel r: 0 < its < 80 < tpc < 250 < tof < 400 < tofext < 423 < cal < 500")
print("# barrel z: -550 < z < 550")
print("# phi: 0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi")
print("# header: Rbin Zbin Quadrant Nval_per_compoment(=20)")
print("# data: Nval_per_compoment x floats")
#print("# R^2: coefficient of determination in multiple linear regression. [0,1]")
print("")
for index, row in result.iterrows():
#print("// ** %s - R^2 %s" % (row.volume_id, row.score))
print("#" + row.volume_id)
r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)
print("%s %s %s 20" % (r_bin, z_bin, quadrant))
for i, yvar in enumerate(row.yvars):
name = row.volume_id #+ '_' + yvar.lower()
print("# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))
coef = [row['intercept'][i]] + list(row['coeffs'][i])
arr = [ "%.5e" % x for x in coef ]
body = ' '.join(arr)
#decl = "const double[] %s = { %s };\n" % (name, body)
#print(decl)
print(body)
print("")
#write_header(run_analysis())
run_analysis_for_all_fields()
#for i in range(10):
# for xvars in combinations(xvars_full, i+1):
#(X, Xtest) = choose(xvars, df, test)
#lr.fit(X, Y)
#ri.fit(X, Y)
#la.fit(X, Y)
#fs.fit(X, Y)
#print xvars
#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
#print("linear R^2[sample] %.8f" % sample_score)
#print("linear R^2[test] %.8f" % test_score)
#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))
#print("lasso R^2[sample] %.8f" % sample_score2)
#print("lasso R^2[test] %.8f" % test_score2)
#print(la.coef_)
#for i in range(len(yvars)):
# print(yvars[i])
# print(pd.DataFrame({"Name": xvars, "Params": lr.coef_[i]}).sort_values(by='Params'))
# print("+ %e" % lr.intercept_[i])
#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)
#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)
#print("[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % sample_dev)
#print("[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev )
#print("lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev2 )
| bsd-3-clause |
tholoien/empiriciSN | Notebooks/demo_funcs.py | 1 | 9565 | import numpy as np
from matplotlib import pyplot as plt
# from astroML.plotting.tools import draw_ellipse
from astroML.plotting import setup_text_plots
# from sklearn.mixture import GMM as skl_GMM
def plot_bic(param_range,bics,lowest_comp):
plt.clf()
setup_text_plots(fontsize=16, usetex=True)
fig = plt.figure(figsize=(12, 6))
plt.plot(param_range,bics,color='blue',lw=2, marker='o')
plt.text(lowest_comp, bics.min() * 0.97 + .03 * bics.max(), '*',
fontsize=14, ha='center')
plt.xticks(param_range)
plt.ylim(bics.min() - 0.05 * (bics.max() - bics.min()),
bics.max() + 0.05 * (bics.max() - bics.min()))
plt.xlim(param_range.min() - 1, param_range.max() + 1)
plt.xticks(param_range,fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel('Number of components',fontsize=18)
plt.ylabel('BIC score',fontsize=18)
plt.show()
def get_demo_data(flist):
x0 = np.array([])
x0_err = np.array([])
x1 = np.array([])
x1_err = np.array([])
c = np.array([])
c_err = np.array([])
z = np.array([])
z_err = np.array([])
logr = np.array([])
logr_err = np.array([])
umag = np.array([])
umag_err = np.array([])
gmag = np.array([])
gmag_err = np.array([])
rmag = np.array([])
rmag_err = np.array([])
imag = np.array([])
imag_err = np.array([])
zmag = np.array([])
zmag_err = np.array([])
SB_u = np.array([])
SB_u_err = np.array([])
SB_g = np.array([])
SB_g_err = np.array([])
SB_r = np.array([])
SB_r_err = np.array([])
SB_i = np.array([])
SB_i_err = np.array([])
SB_z = np.array([])
SB_z_err = np.array([])
profile = np.array([]) #29
urad = np.array([])
urad_err = np.array([])
grad = np.array([])
grad_err = np.array([])
rrad = np.array([])
rrad_err = np.array([])
irad = np.array([])
irad_err = np.array([])
zrad = np.array([])
zrad_err = np.array([])
for filename in flist:
infile = open(filename,'r')
inlines = infile.readlines()
infile.close()
for line1 in inlines:
if line1[0] == '#': continue
line = line1.split(',')
if line[33] == 'nan' \
or line[39] == 'nan' \
or line[45] == 'nan' \
or line[51] == 'nan' \
or line[57] == 'nan': continue
# SN params
x0 = np.append(x0,float(line[7])) #x0
x0_err = np.append(x0_err, float(line[8]))
x1 = np.append(x1, float(line[9])) # x1
x1_err = np.append(x1_err, float(line[10]))
c = np.append(c, float(line[11])) # c
c_err = np.append(c_err, float(line[12]))
# Host params
z = np.append(z, float(line[4]))
z_err = np.append(z_err, 0.0)
logr = np.append(logr,np.log10(float(line[15])/float(line[42]))) # r
logr_err = np.append(logr_err, float(line[43]) / (float(line[42]) * np.log(10)))
umag = np.append(umag, float(line[18])) # u_mag
umag_err = np.append(umag_err, float(line[19]))
gmag = np.append(gmag, float(line[20])) # g_mag
gmag_err = np.append(gmag_err, float(line[21]))
rmag = np.append(rmag, float(line[22])) # r_mag
rmag_err = np.append(rmag_err, float(line[23]))
imag = np.append(imag, float(line[24])) # i_mag
imag_err = np.append(imag_err, float(line[25]))
zmag = np.append(zmag, float(line[26])) # z_mag
zmag_err = np.append(zmag_err, float(line[27]))
SB_u = np.append(SB_u, float(line[32])) # SB_u
SB_u_err = np.append(SB_u_err, float(line[33]))
SB_g = np.append(SB_g, float(line[38])) # SB_g
SB_g_err = np.append(SB_g_err, float(line[39]))
SB_r = np.append(SB_r, float(line[44])) # SB_r
SB_r_err = np.append(SB_r_err, float(line[45]))
SB_i = np.append(SB_i, float(line[50])) # SB_i
SB_i_err = np.append(SB_i_err, float(line[52]))
SB_z = np.append(SB_z, float(line[56])) # SB_z
SB_z_err = np.append(SB_z_err, float(line[57]))
# Radius params
if line[29]=='Exp': profile=np.append(profile,1)
elif line[29]=='deV': profile=np.append(profile,4)
urad = np.append(urad, float(line[30]))
urad_err = np.append(urad_err, float(line[31]))
grad = np.append(grad, float(line[36]))
grad_err = np.append(grad_err, float(line[37]))
rrad = np.append(rrad, float(line[42]))
rrad_err = np.append(rrad_err, float(line[43]))
irad = np.append(irad, float(line[48]))
irad_err = np.append(irad_err, float(line[49]))
zrad = np.append(zrad, float(line[54]))
zrad_err = np.append(zrad_err, float(line[55]))
ug = umag-gmag
ug_err = np.sqrt(umag_err**2+gmag_err**2)
ur = umag-rmag
ur_err = np.sqrt(umag_err**2+rmag_err**2)
ui = umag-imag
ui_err = np.sqrt(umag_err**2+imag_err**2)
uz = umag-zmag
uz_err = np.sqrt(umag_err**2+zmag_err**2)
gr = gmag-rmag
gr_err = np.sqrt(gmag_err**2+rmag_err**2)
gi = gmag-imag
gi_err = np.sqrt(gmag_err**2+imag_err**2)
gz = gmag-zmag
gz_err = np.sqrt(gmag_err**2+zmag_err**2)
ri = rmag-imag
ri_err = np.sqrt(rmag_err**2+imag_err**2)
rz = rmag-zmag
rz_err = np.sqrt(rmag_err**2+zmag_err**2)
iz = imag-zmag
iz_err = np.sqrt(imag_err**2+zmag_err**2)
X = np.vstack([x0, x1, c, z, logr, ug, ur, ui, uz, gr, gi, gz, ri,
rz, iz, SB_u, SB_g, SB_r, SB_i, SB_z]).T
#X = np.vstack([x0, x1, c, z, logr, ug, ur, ui, uz, gr, gi, gz, ri, rz, iz]).T
Xerr = np.zeros(X.shape + X.shape[-1:])
diag = np.arange(X.shape[-1])
Xerr[:, diag, diag] = np.vstack([x0_err**2, x1_err**2, c_err**2,
z_err**2, logr_err**2, ug_err**2,
ur_err**2, ui_err**2, uz_err**2,
gr_err**2, gi_err**2, gz_err**2,
ri_err**2, rz_err**2, iz_err**2,
SB_u_err**2, SB_g_err**2,
SB_r_err**2, SB_i_err**2,
SB_z_err**2]).T
'''
Xerr[:, diag, diag] = np.vstack([x0_err**2, x1_err**2, c_err**2,
z_err**2, logr_err**2, ug_err**2,
ur_err**2, ui_err**2, uz_err**2,
gr_err**2, gi_err**2, gz_err**2,
ri_err**2, rz_err**2, iz_err**2]).T
'''
rad_params = np.vstack([profile, umag, umag_err, urad, urad_err, gmag,
gmag_err, grad, grad_err, rmag, rmag_err, rrad,
rrad_err, imag, imag_err, irad, irad_err, zmag,
zmag_err, zrad, zrad_err]).T
return X, Xerr, rad_params
def plot_separation(test_R, sample_R):
plt.clf()
setup_text_plots(fontsize = 16, usetex = True)
fig = plt.figure(figsize = (12,8))
plt.hist(test_R, 50, histtype = 'step', color = 'red', lw = 2,
label = 'Test', range = (-2.5,2.5))
plt.hist(sample_R, 50, histtype = 'step', color = 'blue',lw = 2,
label = 'Sample', range = (-2.5,2.5))
plt.legend(loc="best")
plt.xlabel("$\log(R/R_e)$", fontsize=18)
plt.ylabel("Number", fontsize=18)
plt.xlim(-2.5, 2.5)
plt.show()
def get_local_SB(R_params, sample_logR):
SB = []
SB_err = []
for i in range(len(sample_logR)):
sep = (10**(sample_logR[i])) * float(R_params[i][11]) #separation
SBs = np.array([])
SB_errs = np.array([])
for j in range(5):
halfmag = float(R_params[i][j*4+1]) + 0.75257
magerr = float(R_params[i][j*4+2])
Re = float(R_params[i][j*4+3])
Re_err = float(R_params[i][j*4+4])
r = sep/Re
Ie = halfmag + 2.5 * np.log10(np.pi * Re**2)
Re2_unc = 2 * Re * Re_err * np.pi
log_unc = 2.5 * Re2_unc / (np.log10(np.pi * Re**2) * np.log(10))
Ie_unc = np.sqrt(magerr**2 + log_unc**2)
if R_params[i][0] == 'Exp':
Io = Ie-1.824
Io_unc = Ie_unc
sb = Io*np.exp(-1.68*(r))
exp_unc = np.exp(-1.68*(r))*1.68*sep*Re_err/(Re**2)
sb_unc = sb * np.sqrt((Io_unc/Io)**2 +
(exp_unc/np.exp(-1.68*(r)))**2)
if np.isnan(sb_unc): sb_unc = 0.0
if sb_unc < 0: sb_unc = sb_unc*-1.0
SBs = np.append(SBs,sb)
SB_errs = np.append(SB_errs,sb_unc)
if R_params[i][0] == 'deV':
Io = Ie-8.328
Io_unc = Ie_unc
sb = Io*np.exp(-7.67*((r)**0.25))
exp_unc = np.exp(-7.67*((r)**0.25))*7.67*sep*Re_err/(4*Re**(1.25))
sb_unc = sb*np.sqrt((Io_unc/Io)**2+(exp_unc \
/np.exp(-7.67*((r)**0.25))))
if np.isnan(sb_unc): sb_unc = 0.0
if sb_unc < 0: sb_unc = sb_unc*-1.0
SBs = np.append(SBs,sb)
SB_errs = np.append(SB_errs,sb_unc)
SB.append(SBs)
SB_err.append(SB_errs)
SB = np.array(SB)
SB_err = np.array(SB_err)
return SB, SB_err
| mit |
mne-tools/mne-tools.github.io | 0.21/_downloads/4334dcda1734aaa30f19ae31c8ed60ca/plot_shift_evoked.py | 29 | 1245 | """
==================================
Shifting time-scale in evoked data
==================================
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import tight_layout
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading evoked data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
ch_names = evoked.info['ch_names']
picks = mne.pick_channels(ch_names=ch_names, include=["MEG 2332"])
# Create subplots
f, (ax1, ax2, ax3) = plt.subplots(3)
evoked.plot(exclude=[], picks=picks, axes=ax1,
titles=dict(grad='Before time shifting'), time_unit='s')
# Apply relative time-shift of 500 ms
evoked.shift_time(0.5, relative=True)
evoked.plot(exclude=[], picks=picks, axes=ax2,
titles=dict(grad='Relative shift: 500 ms'), time_unit='s')
# Apply absolute time-shift of 500 ms
evoked.shift_time(0.5, relative=False)
evoked.plot(exclude=[], picks=picks, axes=ax3,
titles=dict(grad='Absolute shift: 500 ms'), time_unit='s')
tight_layout()
| bsd-3-clause |
ak681443/mana-deep | evaluation/allmods/new_train/ROCeval_Trad.py | 1 | 3603 |
# coding: utf-8
# In[1]:
import os
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import cv2
import scipy.misc
from scipy import spatial
from PIL import Image
import heapq
import sys
# In[2]:
th = int(sys.argv[1])
v = int(sys.argv[2])
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
masks = np.zeros((224,224))
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<th] = v
img1[img1>=th] = 0
masks = masks + img1
masks = masks / v
#masks = np.zeros(masks.shape)
#img1[masks>20] = 0
#print np.average(masks)
#plt.imshow(img1)
#masks = cv2.cvtColor(cv2.imread('/home/arvind/Desktop/mask.jpg'), cv2.COLOR_BGR2GRAY)
#masks = cv2.resize(masks, (224,224))
#masks[masks<100] = 71
#masks[masks!=71] = 0
# In[3]:
# In[5]:
def push_pqueue(queue, priority, value):
if len(queue)>20:
heapq.heappushpop(queue, (priority, value))
else:
heapq.heappush(queue, (priority, value))
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_roc/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<=th] = v
img1[masks>60] = 0
img1[img1>th] = 0
X_test.append(np.array([img1]))
X_test = np.array(X_test).astype('float32')#/ float(np.max(X))
X_test_pred = np.reshape(X_test, (len(X_test), 224, 224, 1))
#X_test_pred = model.predict(X_test, verbose=0)
# In[8]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_roc/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_train = []
files1 = files1[:100 * int(sys.argv[4])]
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<=th] = v
img1[masks>60] = 0
img1[img1>th] = 0
X_train.append(np.array([img1]))
X_train = np.array(X_train).astype('float32')#/ float(np.max(X))
X_train_pred = np.reshape(X_train, (len(X_train), 224, 224, 1))
#X_train_pred = model.predict(X_train, verbose=0)
# In[9]:
import time
mypath = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_roc/'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
files = files[:100 * int(sys.argv[4])]
start_time = time.time()
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_roc/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
cutoff = float(sys.argv[5])
true_positive = 0
false_positive = 0
false_negative = 0
true_negative = 0
for i in np.arange(0, len(files1)):
filen1 = files1[i]
pred = X_test_pred[i]
max_confidence = 0.0
max_file = None
pqueue = []
for j in np.arange(0, len(files)):
filen = files[j]
tpred = X_train_pred[j]
score = 1- spatial.distance.cosine(tpred.flatten(), pred.flatten())
if score > cutoff:
if filen.split('_')[1].split('.')[0] == filen1.split('_')[1].split('.')[0]:
true_positive+=1
else:
false_positive+=1
else:
if filen.split('_')[1].split('.')[0] == filen1.split('_')[1].split('.')[0]:
false_negative+=1
else:
true_negative+=1
print "\n!@#$", float(len(files1)) , float(len(files)), true_positive, false_negative, false_positive, true_negative,"\n"
| apache-2.0 |
fyffyt/pylearn2 | pylearn2/scripts/papers/jia_huang_wkshp_11/evaluate.py | 44 | 3208 | from __future__ import print_function
from optparse import OptionParser
import warnings
try:
from sklearn.metrics import classification_report
except ImportError:
classification_report = None
warnings.warn("couldn't find sklearn.metrics.classification_report")
try:
from sklearn.metrics import confusion_matrix
except ImportError:
confusion_matrix = None
warnings.warn("couldn't find sklearn.metrics.metrics.confusion_matrix")
from galatea.s3c.feature_loading import get_features
from pylearn2.utils import serial
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.cifar100 import CIFAR100
import numpy as np
def test(model, X, y):
print("Evaluating svm")
y_pred = model.predict(X)
#try:
if True:
acc = (y == y_pred).mean()
print("Accuracy ",acc)
"""except:
print("something went wrong")
print('y:')
print(y)
print('y_pred:')
print(y_pred)
print('extra info')
print(type(y))
print(type(y_pred))
print(y.dtype)
print(y_pred.dtype)
print(y.shape)
print(y_pred.shape)
raise
"""
#
def get_test_labels(cifar10, cifar100, stl10):
assert cifar10 + cifar100 + stl10 == 1
if stl10:
print('loading entire stl-10 test set just to get the labels')
stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/test.pkl")
return stl10.y
if cifar10:
print('loading entire cifar10 test set just to get the labels')
cifar10 = CIFAR10(which_set = 'test')
return np.asarray(cifar10.y)
if cifar100:
print('loading entire cifar100 test set just to get the fine labels')
cifar100 = CIFAR100(which_set = 'test')
return np.asarray(cifar100.y_fine)
assert False
def main(model_path,
test_path,
dataset,
**kwargs):
model = serial.load(model_path)
cifar100 = dataset == 'cifar100'
cifar10 = dataset == 'cifar10'
stl10 = dataset == 'stl10'
assert cifar10 + cifar100 + stl10 == 1
y = get_test_labels(cifar10, cifar100, stl10)
X = get_features(test_path, False, False)
if stl10:
num_examples = 8000
if cifar10 or cifar100:
num_examples = 10000
if not X.shape[0] == num_examples:
raise AssertionError('Expected %d examples but got %d' % (num_examples, X.shape[0]))
assert y.shape[0] == num_examples
test(model,X,y)
if __name__ == '__main__':
"""
Useful for quick tests.
Usage: python train_bilinear.py
"""
parser = OptionParser()
parser.add_option("-m", "--model",
action="store", type="string", dest="model_path")
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-o", action="store", dest="output", default = None, help="path to write the report to")
parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)
#(options, args) = parser.parse_args()
#assert options.output
main(model_path='final_model.pkl',
test_path='test_features.npy',
dataset = 'cifar100',
)
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/tests/test_common.py | 20 | 6311 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import re
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
set_checking_parameters,
check_parameters_default_constructible,
check_no_fit_attributes_set_in_init,
check_class_weight_balanced_linear_classifier)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield (_named_check(check_parameters_default_constructible, name),
name, Estimator)
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
estimator = Estimator()
# check this on class
yield _named_check(
check_no_fit_attributes_set_in_init, name), name, Estimator
for check in _yield_all_checks(name, estimator):
set_checking_parameters(estimator)
yield _named_check(check, name), name, estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin))]
for name, Classifier in linear_classifiers:
yield _named_check(check_class_weight_balanced_linear_classifier,
name), name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = dict((name, ispkg)
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__,
prefix='sklearn.'))
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert_equal(missing_tests, [],
'{0} do not have `tests` subpackages. Perhaps they require '
'__init__.py or an add_subpackage directive in the parent '
'setup.py'.format(missing_tests))
| bsd-3-clause |
kagayakidan/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
monkeybutter/AeroMetTree | datasets/dataset_generator.py | 1 | 2702 | import numpy as np
import pandas as pd
from random import random, randint
from datetime import datetime
def randomizer(i0, i1, prob):
if random() < prob and i0 < i1-1:
new_i0 = randint(i0, i1-1)
new_i1 = randint(new_i0+1, i1)
return (new_i0, new_i1)
else:
return None
def splitter(i, j):
if j-i > 2:
return randint(i+1, j-1)
else:
return None
def create_simple_data(size):
input_a = np.sort(np.random.uniform(low=0.0, high=1000.0, size=size))
i = 0
class_var = np.random.normal(i, .1, size=size)
a, b = 0, size-1
while True:
i += 1
bounds = randomizer(a, b, .8)
if bounds is None:
break
class_var[a:b] = np.random.normal(i, .1, size=b-a)
a, b = bounds
return pd.DataFrame(np.vstack((input_a, class_var)).T, columns=['input1', 'class_var'])
def create_simple_data2(size, min_size):
input_a = np.sort(np.random.uniform(low=0.0, high=1000.0, size=size))
class_var = np.zeros(size)
def write_chunk(i, j, min_size, level):
class_var[i:j] = np.random.normal(level, .1, size=j-i)
k = splitter(i, j)
if k-i > min_size:
write_chunk(i, k, min_size, level+1)
if j-k > min_size:
write_chunk(k, j, min_size, level+1)
write_chunk(0, size-1, min_size, 0)
return pd.DataFrame(np.vstack((input_a, class_var)).T, columns=['input1', 'class_var'])
def transform(name):
df = pd.read_csv("./{}.csv".format(name))
df['time'] = df['time'].map(lambda x: datetime.strptime(x, '%H:%M'))
df['time'] = df['time'].map(lambda x: ((x.minute / 60.0 + x.hour) / 24.0) * 360)
df['date'] = df['date'].map(lambda x: datetime.strptime(x, '%Y-%m-%d'))
df['date'] = df['date'].map(lambda x: (x.timetuple().tm_yday / 365.0)*360.0)
df['date'] = df['date'].map(lambda x: int(10 * round(float(x)/10)))
df['gfs_press'] = df['gfs_press'].map(lambda x: int(round(float(x))))
df['gfs_rh'] = df['gfs_rh'].map(lambda x: int(round(float(x))))
df['gfs_temp'] = df['gfs_temp'].map(lambda x: int(round(float(x))))
df['gfs_wind_dir'] = df['gfs_wind_dir'].map(lambda x: int(10 * round(float(x)/10)))
df['gfs_wind_spd'] = df['gfs_wind_spd'].map(lambda x: int(.5 * round(float(x)/.5)))
#df = df.drop(['metar_press', 'metar_rh', 'metar_temp', 'metar_wind_dir'], 1)
df = df.drop(['metar_wind_dir'], 1)
df.index = range(0, len(df.index))
df.to_csv("./{}_clean.csv".format(name))
if __name__ == "__main__":
airports = ["eddt", "egll", "lebl", "lfpg", "limc", "yssy", "zbaa"]
for airport in airports:
transform(airport) | mit |
arjoly/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
ilo10/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
pandyag/trading-with-python | lib/classes.py | 76 | 7847 | """
worker classes
@author: Jev Kuznetsov
Licence: GPL v2
"""
__docformat__ = 'restructuredtext'
import os
import logger as logger
import yahooFinance as yahoo
from functions import returns, rank
from datetime import date
from pandas import DataFrame, Series
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Symbol(object):
'''
Symbol class, the foundation of Trading With Python library,
This class acts as an interface to Yahoo data, Interactive Brokers etc
'''
def __init__(self,name):
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('class created.')
self.dataDir = os.getenv("USERPROFILE")+'\\twpData\\symbols\\'+self.name
self.log.debug('Data dir:'+self.dataDir)
self.ohlc = None # historic OHLC data
def downloadHistData(self, startDate=(2010,1,1),endDate=date.today().timetuple()[:3],\
source = 'yahoo'):
'''
get historical OHLC data from a data source (yahoo is default)
startDate and endDate are tuples in form (d,m,y)
'''
self.log.debug('Getting OHLC data')
self.ohlc = yahoo.getHistoricData(self.name,startDate,endDate)
def histData(self,column='adj_close'):
'''
Return a column of historic data.
Returns
-------------
df : DataFrame
'''
s = self.ohlc[column]
return DataFrame(s.values,s.index,[self.name])
@property
def dayReturns(self):
''' close-close returns '''
return (self.ohlc['adj_close']/self.ohlc['adj_close'].shift(1)-1)
#return DataFrame(s.values,s.index,[self.name])
class Portfolio(object):
def __init__(self,histPrice,name=''):
"""
Constructor
Parameters
----------
histPrice : historic price
"""
self.histPrice = histPrice
self.params = DataFrame(index=self.symbols)
self.params['capital'] = 100*np.ones(self.histPrice.shape[1],dtype=np.float)
self.params['last'] = self.histPrice.tail(1).T.ix[:,0]
self.params['shares'] = self.params['capital']/self.params['last']
self.name= name
def setHistPrice(self,histPrice):
self.histPrice = histPrice
def setShares(self,shares):
""" set number of shares, adjust capital
shares: list, np array or Series
"""
if len(shares) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['shares'] = shares
self.params['capital'] = self.params['shares']*self.params['last']
def setCapital(self,capital):
""" Set target captial, adjust number of shares """
if len(capital) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['capital'] = capital
self.params['shares'] = self.params['capital']/self.params['last']
def calculateStatistics(self,other=None):
''' calculate spread statistics, save internally '''
res = {}
res['micro'] = rank(self.returns[-1],self.returns)
res['macro'] = rank(self.value[-1], self.value)
res['last'] = self.value[-1]
if other is not None:
res['corr'] = self.returns.corr(returns(other))
return Series(res,name=self.name)
@property
def symbols(self):
return self.histPrice.columns.tolist()
@property
def returns(self):
return (returns(self.histPrice)*self.params['capital']).sum(axis=1)
@property
def value(self):
return (self.histPrice*self.params['shares']).sum(axis=1)
def __repr__(self):
return ("Portfolio %s \n" % self.name ) + str(self.params)
#return ('Spread %s :' % self.name ) + str.join(',',
# ['%s*%.2f' % t for t in zip(self.symbols,self.capital)])
class Spread(object):
'''
Spread class, used to build a spread out of two symbols.
'''
def __init__(self,stock,hedge,beta=None):
''' init with symbols or price series '''
if isinstance(stock,str) and isinstance(hedge,str):
self.symbols = [stock,hedge]
self._getYahooData()
elif isinstance(stock,pd.Series) and isinstance(hedge,pd.Series):
self.symbols = [stock.name,hedge.name]
self.price = pd.DataFrame(dict(zip(self.symbols,[stock,hedge]))).dropna()
else:
raise ValueError('Both stock and hedge should be of the same type, symbol string or Series')
# calculate returns
self.returns = self.price.pct_change()
if beta is not None:
self.beta = beta
else:
self.estimateBeta()
# set data
self.data = pd.DataFrame(index = self.symbols)
self.data['beta'] = pd.Series({self.symbols[0]:1., self.symbols[1]:-self.beta})
def calculateShares(self,bet):
''' set number of shares based on last quote '''
if 'price' not in self.data.columns:
print 'Getting quote...'
self.getQuote()
self.data['shares'] = bet*self.data['beta']/self.data['price']
def estimateBeta(self,plotOn=False):
""" linear estimation of beta """
x = self.returns[self.symbols[1]] # hedge
y = self.returns[self.symbols[0]] # stock
#avoid extremes
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
if plotOn:
plt.plot(x,y,'o')
plt.grid(True)
iteration = 1
nrOutliers = 1
while iteration < 3 and nrOutliers > 0 :
(a,b) = np.polyfit(x,y,1)
yf = np.polyval([a,b],x)
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
if plotOn:
yf = x*beta
plt.plot(x,yf,'-',color='red')
plt.xlabel(self.symbols[1])
plt.ylabel(self.symbols[0])
self.beta = beta
return beta
@property
def spread(self):
''' return daily returns of the pair '''
return (self.returns*self.data['beta']).sum(1)
def getQuote(self):
''' get current quote from yahoo '''
q = yahoo.getQuote(self.symbols)
self.data['price'] = q['last']
def _getYahooData(self, startDate=(2007,1,1)):
""" fetch historic data """
data = {}
for symbol in self.symbols:
print 'Downloading %s' % symbol
data[symbol]=(yahoo.getHistoricData(symbol,sDate=startDate)['adj_close'] )
self.price = pd.DataFrame(data).dropna()
def __repr__(self):
return 'Spread 1*%s & %.2f*%s ' % (self.symbols[0],-self.beta,self.symbols[1])
@property
def name(self):
return str.join('_',self.symbols)
if __name__=='__main__':
s = Spread(['SPY','IWM'])
| bsd-3-clause |
gem/oq-engine | openquake/sep/utils.py | 1 | 9541 | from typing import Callable, Union
import numpy as np
import pandas as pd
import pyproj as pj
try:
from osgeo import gdal
except ImportError:
gdal = None
from numpy.lib.stride_tricks import as_strided
def sample_raster_at_points(raster_file: str, lon_pts: np.ndarray, lat_pts:
np.ndarray, out_of_bounds_val: float=0.0):
"""
Gets the values of a raster at each (lon,lat) point specified.
This is done using a nearest-neighbor approach; no interpolation is
performed.
:param raster_file:
A filename (and path) of a raster value to be opened by GDAL.
:param lon_pts:
Longitudes of points
:param lat_pts:
Latitudes of points
:param out_of_bounds_val:
Value to be returned if the points fall outside of the raster's extent.
Defaults to 0.0
:returns:
Numpy array of raster values sampled at points.
"""
if isinstance(lon_pts, pd.Series):
lon_pts = lon_pts.values
if isinstance(lat_pts, pd.Series):
lat_pts = lat_pts.values
raster_ds = gdal.Open(raster_file)
gt = raster_ds.GetGeoTransform()
raster_proj_wkt = raster_ds.GetProjection()
raster_data = raster_ds.GetRasterBand(1).ReadAsArray()
raster_proj = pj.CRS(raster_proj_wkt)
if raster_proj.to_epsg() != 4326:
trans = pj.transformer.Transformer.from_crs("epsg:4326", raster_proj)
lat_pts, lon_pts = trans.transform(lat_pts, lon_pts)
x_rast = np.int_(np.round((lon_pts - gt[0]) / gt[1]))
y_rast = np.int_(np.round((lat_pts - gt[3]) / gt[5]))
def sample_raster(raster, row, col):
if (0 <= row < raster.shape[0]) and (0 <= col < raster.shape[1]):
return raster[row, col]
else:
return out_of_bounds_val
interp_vals = np.array(
[
sample_raster(raster_data, y_rast[i], xr)
for i, xr in enumerate(x_rast)
]
)
return interp_vals
def make_2d_array_strides(arr, window_radius, linear=True):
"""
Creates an array of strides representing indices for windows/sub-arrays
over a larger array, for rapid calculations of rolling window functions.
:param arr:
Array of values to be used in the calculations.
:param window_radius:
Radius of the window (not counting the origin) in array count units.
:param linear:
Flag specifying the shape of the stride collection.
:returns:
Array of strides
Slightly modified from https://gist.github.com/thengineer/10024511
"""
if np.isscalar(window_radius):
window_radius = (window_radius, window_radius)
ax = np.zeros(
shape=(
arr.shape[0] + 2 * window_radius[0],
arr.shape[1] + 2 * window_radius[1],
)
)
ax[:] = np.nan
ax[
window_radius[0] : ax.shape[0] - window_radius[0],
window_radius[1] : ax.shape[1] - window_radius[1],
] = arr
shape = arr.shape + (1 + 2 * window_radius[0], 1 + 2 * window_radius[1])
strides = ax.strides + ax.strides
s = as_strided(ax, shape=shape, strides=strides)
return s.reshape(arr.shape + (shape[2] * shape[3],)) if linear else s
def rolling_array_operation(
array: np.ndarray, func: Callable, window_size: int, trim: bool = False
) -> np.ndarray:
"""
Rolls a function that operates on a square subarray over the array. The
values in the resulting array will be at the center of each subarray.
:param array:
Array of values that the window function will be passed over.
:param func:
Function to be applied to each subarray. Should operate on an array and
return a scalar.
:param window_size:
Dimension of the (square) sub-array or window in array counts, not in
spatial (or other dimensional) units. Should be an odd
integer, so that the result of the function can be unambiguously applied
to the center of the window.
:param trim:
Boolean flag to trim off the borders of the resulting array, from where
the window overhangs the array.
"""
if window_size % 2 != 1:
raise ValueError(
"window_size should be an odd integer; {} passed".format(
window_size
)
)
window_rad = window_size // 2
strides = make_2d_array_strides(array, window_rad)
strides = np.ma.masked_array(strides, mask=np.isnan(strides))
result = func(strides, axis=-1).data
if trim:
result = result[window_rad:-window_rad, window_rad:-window_rad]
return result
def rolling_raster_operation(
in_raster,
func: Callable,
window_size: int,
outfile=None,
raster_band: int = 1,
trim: bool = False,
write: bool = False,
):
if trim == True:
return NotImplementedError("Trimming not supported at this time.")
if outfile is None:
if write == True:
raise ValueError("Must specify raster outfile")
else:
outfile = "./tmp.tiff"
ds = gdal.Open(in_raster)
rast = ds.GetRasterBand(raster_band).ReadAsArray()
rast = np.asarray(rast)
new_arr = rolling_array_operation(rast, func, window_size, trim=trim)
drv = gdal.GetDriverByName("GTiff")
new_ds = drv.Create(
outfile,
xsize=new_arr.shape[1],
ysize=new_arr.shape[0],
bands=1,
eType=gdal.GDT_Float32,
)
new_ds.SetGeoTransform(ds.GetGeoTransform())
new_ds.SetProjection(ds.GetProjection())
new_ds.GetRasterBand(1).WriteArray(new_arr)
if write:
new_ds.FlushCache()
new_ds = None
return new_ds
def relief(arr, axis=-1):
return np.amax(arr, axis=axis) - np.amin(arr, axis=axis)
def make_local_relief_raster(
input_dem, window_size, outfile=None, write=False, trim=False
):
relief_arr = rolling_raster_operation(
input_dem, relief, window_size, outfile, write=write, trim=trim
)
return relief_arr
def slope_angle_to_gradient(slope, unit: str = "deg"):
if unit in ["radians", "radian", "rad"]:
slope_r = slope
elif unit in ["degrees", "deg", "degree"]:
slope_r = np.radians(slope)
else:
raise ValueError('Slope units need to be "radians" or "degrees"')
return np.tan(slope_r)
def vs30_from_slope(
slope: Union[float, np.array],
slope_unit: str = "deg",
tectonic_region_type: str = "active",
method: str = "wald_allen_2007",
) -> Union[float, np.array]:
"""
"""
if slope_unit in ["grad", "gradient"]:
grad = slope
else:
grad = slope_angle_to_gradient(slope, unit=slope_unit)
if method == "wald_allen_2007":
vs30 = vs30_from_slope_wald_allen_2007(
grad, tectonic_region_type=tectonic_region_type
)
else:
raise NotImplementedError(f"{method} not yet implemented.")
return vs30
def vs30_from_slope_wald_allen_2007(
gradient: Union[float, np.array], tectonic_region_type: str = "active"
) -> Union[float, np.array]:
"""
Calculates Vs30 from the topographic slope (given as a gradient) using
the methods of Wald and Allen, 2007 BSSA.
Either 'active' or 'stable' are valid for the `tectonic_region_type`
argument.
"""
if np.isscalar(gradient):
grad = np.array([gradient])
if tectonic_region_type == "active":
vs30 = vs30_from_slope_wald_allen_2007_active(grad)
elif tectonic_region_type == "stable":
vs30 = vs30_from_slope_wald_allen_2007_stable(grad)
else:
raise ValueError(
f"'{tectonic_region_type}' must be 'active' or 'stable'"
)
vs30 = vs30[0]
else:
if tectonic_region_type == "active":
vs30 = vs30_from_slope_wald_allen_2007_active(gradient)
elif tectonic_region_type == "stable":
vs30 = vs30_from_slope_wald_allen_2007_stable(gradient)
else:
raise ValueError(
f"'{tectonic_region_type}' must be 'active' or 'stable'"
)
return vs30
def vs30_from_slope_wald_allen_2007_active(gradient: np.array) -> np.ndarray:
vs30 = np.zeros(gradient.shape)
vs30[(gradient < 1.0e-4)] = np.mean([0.0, 180.0])
vs30[(1.0e-4 <= gradient) & (gradient < 2.2e-3)] = np.mean([180.0, 240.0])
vs30[(2.2e-3 <= gradient) & (gradient < 6.3e-3)] = np.mean([240.0, 300.0])
vs30[(6.3e-3 <= gradient) & (gradient < 0.0180)] = np.mean([300.0, 360.0])
vs30[(0.0180 <= gradient) & (gradient < 0.0500)] = np.mean([360.0, 490.0])
vs30[(0.0500 <= gradient) & (gradient < 0.1000)] = np.mean([490.0, 620.0])
vs30[(0.1000 <= gradient) & (gradient < 0.1380)] = np.mean([620.0, 760.0])
vs30[(0.1380 <= gradient)] = 760.0
return vs30
def vs30_from_slope_wald_allen_2007_stable(gradient: np.array) -> np.ndarray:
vs30 = np.zeros(gradient.shape)
vs30[(gradient < 2.0e-5)] = np.mean([0.0, 180.0])
vs30[(2.0e-5 <= gradient) & (gradient < 2.0e-3)] = np.mean([180.0, 240.0])
vs30[(2.0e-3 <= gradient) & (gradient < 4.0e-3)] = np.mean([240.0, 300.0])
vs30[(4.0e-3 <= gradient) & (gradient < 7.2e-3)] = np.mean([300.0, 360.0])
vs30[(7.2e-3 <= gradient) & (gradient < 0.0130)] = np.mean([360.0, 490.0])
vs30[(0.0130 <= gradient) & (gradient < 0.0180)] = np.mean([490.0, 620.0])
vs30[(0.0180 <= gradient) & (gradient < 0.0250)] = np.mean([620.0, 760.0])
vs30[(0.0250 <= gradient)] = 760.0
return vs30
| agpl-3.0 |
ioos/system-test | Theme_2_Extreme_Events/Scenario_2A/Extremes_Wind/utilities.py | 2 | 9758 | """
Utility functions for Scenario_A_Extreme_Winds.ipynb
"""
from IPython.display import HTML, Javascript, display
import uuid
import fnmatch
import lxml.html
from io import BytesIO
from lxml import etree
from urllib import urlopen
from warnings import warn
from windrose import WindroseAxes
import numpy as np
# Scientific stack.
import matplotlib.pyplot as plt
from owslib import fes
from owslib.ows import ExceptionReport
from pandas import DataFrame, read_csv
from netCDF4 import MFDataset, date2index, num2date
def insert_progress_bar(title='Please wait...', color='blue'):
"""Inserts a simple progress bar into the IPython notebook."""
print(title)
divid = str(uuid.uuid4())
pb = HTML(
"""
<div style="border: 1px solid black; width:500px">
<div id="%s" style="background-color:red; width:0%%"> </div>
</div>
""" % divid)
display(pb)
return divid
def update_progress_bar(divid, percent_compelte):
"""Updates the simple progress bar into the IPython notebook."""
display(Javascript("$('div#%s').width('%i%%')" % (divid, int(percent_compelte))))
def fes_date_filter(start_date='1900-01-01', stop_date='2100-01-01',
constraint='overlaps'):
"""Hopefully something like this will be implemented in fes soon."""
if constraint == 'overlaps':
propertyname = 'apiso:TempExtent_begin'
start = fes.PropertyIsLessThanOrEqualTo(propertyname=propertyname,
literal=stop_date)
propertyname = 'apiso:TempExtent_end'
stop = fes.PropertyIsGreaterThanOrEqualTo(propertyname=propertyname,
literal=start_date)
elif constraint == 'within':
propertyname = 'apiso:TempExtent_begin'
start = fes.PropertyIsGreaterThanOrEqualTo(propertyname=propertyname,
literal=start_date)
propertyname = 'apiso:TempExtent_end'
stop = fes.PropertyIsLessThanOrEqualTo(propertyname=propertyname,
literal=stop_date)
return start, stop
def service_urls(records, service='odp:url'):
"""Extract service_urls of a specific type (DAP, SOS) from records."""
service_string = 'urn:x-esri:specification:ServiceType:' + service
urls = []
for key, rec in records.items():
# Create a generator object, and iterate through it until the match is
# found if not found, gets the default value (here "none").
url = next((d['url'] for d in rec.references if
d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
def get_station_longName(station, provider):
"""Get longName for specific station using DescribeSensor
request."""
if provider.upper() == 'NDBC':
url = ('http://sdf.ndbc.noaa.gov/sos/server.php?service=SOS&'
'request=DescribeSensor&version=1.0.0&outputFormat=text/xml;subtype="sensorML/1.0.1"&'
'procedure=urn:ioos:station:wmo:%s') % station
elif provider.upper() == 'COOPS':
url = ('http://opendap.co-ops.nos.noaa.gov/ioos-dif-sos/SOS?service=SOS&'
'request=DescribeSensor&version=1.0.0&'
'outputFormat=text/xml;subtype="sensorML/1.0.1"&'
'procedure=urn:ioos:station:NOAA.NOS.CO-OPS:%s') % station
try:
tree = etree.parse(urlopen(url))
root = tree.getroot()
namespaces = {'sml': "http://www.opengis.net/sensorML/1.0.1"}
longName = root.xpath("//sml:identifier[@name='longName']/sml:Term/sml:value/text()", namespaces=namespaces)
if len(longName) == 0:
# Just return the station id
return station
else:
return longName[0]
except Exception as e:
warn(e)
# Just return the station id
return station
def collector2df(collector, station, sos_name, provider='COOPS'):
"""Request CSV response from SOS and convert to Pandas DataFrames."""
collector.features = [station]
collector.variables = [sos_name]
long_name = get_station_longName(station, provider)
try:
response = collector.raw(responseFormat="text/csv")
data_df = read_csv(BytesIO(response.encode('utf-8')),
parse_dates=True,
index_col='date_time')
col = 'wind_speed (m/s)'
data_df['Observed Data'] = data_df[col]
except ExceptionReport as e:
# warn("Station %s is not NAVD datum. %s" % (long_name, e))
print(str(e))
data_df = DataFrame() # Assigning an empty DataFrame for now.
data_df.name = long_name
data_df.provider = provider
return data_df
def get_coordinates(bounding_box, bounding_box_type=''):
"""Create bounding box coordinates for the map."""
coordinates = []
if bounding_box_type == "box":
coordinates.append([bounding_box[1], bounding_box[0]])
coordinates.append([bounding_box[1], bounding_box[2]])
coordinates.append([bounding_box[3], bounding_box[2]])
coordinates.append([bounding_box[3], bounding_box[0]])
coordinates.append([bounding_box[1], bounding_box[0]])
return coordinates
def inline_map(m):
"""From http://nbviewer.ipython.org/gist/rsignell-usgs/
bea6c0fe00a7d6e3249c."""
m._build_map()
srcdoc = m.HTML.replace('"', '"')
embed = HTML('<iframe srcdoc="{srcdoc}" '
'style="width: 100%; height: 500px; '
'border: none"></iframe>'.format(srcdoc=srcdoc))
return embed
def css_styles():
return HTML("""
<style>
.info {
background-color: #fcf8e3; border-color: #faebcc;
border-left: 5px solid #8a6d3b; padding: 0.5em; color: #8a6d3b;
}
.success {
background-color: #d9edf7; border-color: #bce8f1;
border-left: 5px solid #31708f; padding: 0.5em; color: #31708f;
}
.error {
background-color: #f2dede; border-color: #ebccd1;
border-left: 5px solid #a94442; padding: 0.5em; color: #a94442;
}
.warning {
background-color: #fcf8e3; border-color: #faebcc;
border-left: 5px solid #8a6d3b; padding: 0.5em; color: #8a6d3b;
}
</style>
""")
def gather_station_info(obs_loc_df, st_list, source):
st_data = obs_loc_df['station_id']
lat_data = obs_loc_df['latitude (degree)']
lon_data = obs_loc_df['longitude (degree)']
for k in range(0, len(st_data)):
station_name = st_data[k]
if station_name in st_list:
continue
else:
st_list[station_name] = {}
st_list[station_name]["lat"] = lat_data[k]
st_list[station_name]["source"] = source
st_list[station_name]["lon"] = lon_data[k]
return st_list
def get_ncfiles_catalog(station_id, jd_start, jd_stop):
station_name = station_id.split(":")[-1]
uri = 'http://dods.ndbc.noaa.gov/thredds/dodsC/data/stdmet'
url = ('%s/%s/' % (uri, station_name))
urls = _url_lister(url)
filetype = "*.nc"
file_list = [filename for filename in fnmatch.filter(urls, filetype)]
files = [fname.split('/')[-1] for fname in file_list]
urls = ['%s/%s/%s' % (uri, station_name, fname) for fname in files]
try:
nc = MFDataset(urls)
time_dim = nc.variables['time']
calendar = 'gregorian'
idx_start = date2index(jd_start, time_dim, calendar=calendar,
select='nearest')
idx_stop = date2index(jd_stop, time_dim, calendar=calendar,
select='nearest')
dir_dim = np.array(nc.variables['wind_dir'][idx_start:idx_stop, 0, 0], dtype=float)
speed_dim = np.array(nc.variables['wind_spd'][idx_start:idx_stop, 0, 0])
# Replace fill values with NaN
speed_dim[speed_dim == nc.variables['wind_spd']._FillValue] = np.nan
if dir_dim.ndim != 1:
dir_dim = dir_dim[:, 0]
speed_dim = speed_dim[:, 0]
time_dim = nc.variables['time']
dates = num2date(time_dim[idx_start:idx_stop],
units=time_dim.units,
calendar='gregorian').squeeze()
mask = np.isfinite(speed_dim)
data = dict()
data['wind_speed (m/s)'] = speed_dim[mask]
data['wind_from_direction (degree)'] = dir_dim[mask]
time = dates[mask]
# columns = ['wind_speed (m/s)',
# 'wind_from_direction (degree)']
df = DataFrame(data=data, index=time)
return df
except Exception as e:
print str(e)
df = DataFrame()
return df
def new_axes():
fig = plt.figure(figsize=(8, 8), dpi=80, facecolor='w', edgecolor='w')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect, axisbg='w')
fig.add_axes(ax)
return ax
def set_legend(ax, label=''):
"""Adjust the legend box."""
l = ax.legend(title=label)
plt.setp(l.get_texts(), fontsize=8)
def _url_lister(url):
urls = []
connection = urlopen(url)
dom = lxml.html.fromstring(connection.read())
for link in dom.xpath('//a/@href'):
urls.append(link)
return urls
def nearxy(x, y, xi, yi):
"""Find the indices x[i] of arrays (x,y) closest to the points (xi, yi)."""
ind = np.ones(len(xi), dtype=int)
dd = np.ones(len(xi), dtype='float')
for i in np.arange(len(xi)):
dist = np.sqrt((x-xi[i])**2 + (y-yi[i])**2)
ind[i] = dist.argmin()
dd[i] = dist[ind[i]]
return ind, dd
| unlicense |
ddeunagomez/kea-landscape-tool | src/python_plot_generator/json_to_plots.py | 1 | 4234 | import json
import math
import matplotlib as m
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import sys
import ast
def makeColormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return m.colors.LinearSegmentedColormap('CustomMap', cdict)
def plotMap(matrix, fig, ax):
""" Plot a matrix as a HeatMap
matrix: the Numpy array to be plotted
fig: The figure object to use for plotting
ax: the axis object for plotting
"""
# Colormap for the legend
cm = None
#c = m.colors.ColorConverter().to_rgb
#cm = makeColormap([(0,1,0), (1,1,0), 0.1, (1,1,0), (1,0.5,0), 0.66, (1,0.5,0),(1,0,0)])
#cm = makeColormap([(0,0.011,0.176), (1,1,0), 0.1, (1,1,0), (1,0.5,0), 0.66, (1,0.5,0),(1,1,1)])
# Build the heatmap
heatmap = ax.pcolor(matrix, cmap=cm)
# Format
ax.set_aspect('equal')
ax.set_frame_on(False) # turn off the frame
# put the major ticks at the middle of each cell
#ax.set_yticks(np.arange(matrix.shape[0]) + 0.5, minor=False)
#ax.set_xticks(np.arange(matrix.shape[1]) + 0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
#ax.xaxis.tick_top()
#ax.set_xticklabels(range(0,matrix.shape[0]), minor=False)
#ax.set_yticklabels(range(0,matrix.shape[1]), minor=False)
ax.grid(False)
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
#ax.get_xaxis().set_ticks(range(0,matrix.shape[0]+1,10))
#ax.get_yaxis().set_ticks(range(0,matrix.shape[0]+1,10))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(heatmap,cax=cax,ticks=[])
return (fig,ax)
def plotSolution(solution_data, figure, axis):
side = int(math.sqrt(int(solution_data["nodes"])))
currents = ast.literal_eval(str(solution_data["node_currents"]))
currents = np.asarray(currents)
currents = currents.reshape((side, side))
plotMap(currents,figure,axis)
def readSolutions(json_filename, plot_intermediate = False):
data = json.load(open(json_filename))
base_solution = data["base_solution"]
initial_solution = data["initial_solution"]
best_solution = data["best_solution"]
if plot_intermediate:
intermediate_count = int(data["solutions_count"])
else:
intermediate_count = 0
number_plots = intermediate_count + 3 #base, initial and best
columns = min(number_plots,10.0)
rows = math.ceil(number_plots/columns)
fig = plt.figure(figsize=(20,20))
axis_base = fig.add_subplot(rows,columns,1)
plt.title("Base Solution ("+str(base_solution["resistance"])+")")
axis_initial = fig.add_subplot(rows,columns,2)
plt.title("Initial Solution ("+str(initial_solution["resistance"])+")")
axis_intermediates = {}
for i in range(0, intermediate_count):
axis_intermediates[i] = fig.add_subplot(rows,columns,3 + i)
plt.title("Solution"+str(i)+" ("+str(initial_solution["resistance"])+")")
axis_best = fig.add_subplot(rows,columns,rows*columns)
plt.title("Best Solution ("+str(best_solution["resistance"])+")")
fig.tight_layout()
plt.subplots_adjust(wspace = 0.1 )
plotSolution(base_solution,fig, axis_base)
plotSolution(initial_solution,fig, axis_initial)
for i in range(0, intermediate_count):
plotSolution(data["solution_"+str(i)], fig, axis_intermediates[i])
plotSolution(best_solution,fig, axis_best)
plt.show()
if __name__ == "__main__":
readSolutions(sys.argv[1])
| lgpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/__init__.py | 5 | 3084 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.18.1'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| mit |
KEHANG/RMG-Py | rmgpy/cantherm/thermo.py | 2 | 11460 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains the :class:`ThermoJob` class, used to compute and save the
thermodynamics information for a single species.
"""
import os.path
import math
import numpy.linalg
import logging
import rmgpy.constants as constants
from rmgpy.cantherm.output import prettify
from rmgpy.statmech import *
from rmgpy.thermo import *
################################################################################
class ThermoJob:
"""
A representation of a CanTherm thermodynamics job. This job is used to
compute and save the thermodynamics information for a single species.
"""
def __init__(self, species, thermoClass):
self.species = species
self.thermoClass = thermoClass
def execute(self, outputFile=None, plot=False):
"""
Execute the thermodynamics job, saving the results to the
given `outputFile` on disk.
"""
self.generateThermo()
if outputFile is not None:
self.save(outputFile)
if plot:
self.plot(os.path.dirname(outputFile))
def generateThermo(self):
"""
Generate the thermodynamic data for the species and fit it to the
desired heat capacity model (as specified in the `thermoClass`
attribute).
"""
if self.thermoClass.lower() not in ['wilhoit', 'nasa']:
raise Exception('Unknown thermodynamic model "{0}".'.format(self.thermoClass))
species = self.species
logging.info('Generating {0} thermo model for {1}...'.format(self.thermoClass, species))
Tlist = numpy.arange(10.0, 3001.0, 10.0, numpy.float64)
Cplist = numpy.zeros_like(Tlist)
H298 = 0.0
S298 = 0.0
conformer = self.species.conformer
for i in range(Tlist.shape[0]):
Cplist[i] += conformer.getHeatCapacity(Tlist[i])
H298 += conformer.getEnthalpy(298.) + conformer.E0.value_si
S298 += conformer.getEntropy(298.)
if not any([isinstance(mode, (LinearRotor, NonlinearRotor)) for mode in conformer.modes]):
# Monatomic species
linear = False
Nfreq = 0
Nrotors = 0
Cp0 = 2.5 * constants.R
CpInf = 2.5 * constants.R
else:
# Polyatomic species
linear = True if isinstance(conformer.modes[1], LinearRotor) else False
Nfreq = len(conformer.modes[2].frequencies.value)
Nrotors = len(conformer.modes[3:])
Cp0 = (3.5 if linear else 4.0) * constants.R
CpInf = Cp0 + (Nfreq + 0.5 * Nrotors) * constants.R
wilhoit = Wilhoit()
if Nfreq == 0 and Nrotors == 0:
wilhoit.Cp0 = (Cplist[0],"J/(mol*K)")
wilhoit.CpInf = (Cplist[0],"J/(mol*K)")
wilhoit.B = (500.,"K")
wilhoit.H0 = (0.0,"J/mol")
wilhoit.S0 = (0.0,"J/(mol*K)")
wilhoit.H0 = (H298 -wilhoit.getEnthalpy(298.15), "J/mol")
wilhoit.S0 = (S298 - wilhoit.getEntropy(298.15),"J/(mol*K)")
else:
wilhoit.fitToData(Tlist, Cplist, Cp0, CpInf, H298, S298, B0=500.0)
if self.thermoClass.lower() == 'nasa':
species.thermo = wilhoit.toNASA(Tmin=10.0, Tmax=3000.0, Tint=500.0)
else:
species.thermo = wilhoit
def save(self, outputFile):
"""
Save the results of the thermodynamics job to the file located
at `path` on disk.
"""
species = self.species
logging.info('Saving thermo for {0}...'.format(species.label))
f = open(outputFile, 'a')
f.write('# Thermodynamics for {0}:\n'.format(species.label))
H298 = species.thermo.getEnthalpy(298) / 4184.
S298 = species.thermo.getEntropy(298) / 4.184
f.write('# Enthalpy of formation (298 K) = {0:9.3f} kcal/mol\n'.format(H298))
f.write('# Entropy of formation (298 K) = {0:9.3f} cal/(mol*K)\n'.format(S298))
f.write('# =========== =========== =========== =========== ===========\n')
f.write('# Temperature Heat cap. Enthalpy Entropy Free energy\n')
f.write('# (K) (cal/mol*K) (kcal/mol) (cal/mol*K) (kcal/mol)\n')
f.write('# =========== =========== =========== =========== ===========\n')
for T in [300,400,500,600,800,1000,1500,2000,2400]:
Cp = species.thermo.getHeatCapacity(T) / 4.184
H = species.thermo.getEnthalpy(T) / 4184.
S = species.thermo.getEntropy(T) / 4.184
G = species.thermo.getFreeEnergy(T) / 4184.
f.write('# {0:11g} {1:11.3f} {2:11.3f} {3:11.3f} {4:11.3f}\n'.format(T, Cp, H, S, G))
f.write('# =========== =========== =========== =========== ===========\n')
string = 'thermo(label={0!r}, thermo={1!r})'.format(species.label, species.thermo)
f.write('{0}\n\n'.format(prettify(string)))
f.close()
f = open(os.path.join(os.path.dirname(outputFile), 'chem.inp'), 'a')
thermo = species.thermo
if isinstance(thermo, NASA):
poly_low = thermo.polynomials[0]
poly_high = thermo.polynomials[1]
# Determine the number of each type of element in the molecule
elements = ['C','H','N','O']; elementCounts = [0,0,0,0]
# Remove elements with zero count
index = 2
while index < len(elementCounts):
if elementCounts[index] == 0:
del elements[index]
del elementCounts[index]
else:
index += 1
# Line 1
string = '{0:<16} '.format(species.label)
if len(elements) <= 4:
# Use the original Chemkin syntax for the element counts
for symbol, count in zip(elements, elementCounts):
string += '{0!s:<2}{1:<3d}'.format(symbol, count)
string += ' ' * (4 - len(elements))
else:
string += ' ' * 4
string += 'G{0:<10.3f}{1:<10.3f}{2:<8.2f} 1'.format(poly_low.Tmin.value_si, poly_high.Tmax.value_si, poly_low.Tmax.value_si)
if len(elements) > 4:
string += '&\n'
# Use the new-style Chemkin syntax for the element counts
# This will only be recognized by Chemkin 4 or later
for symbol, count in zip(elements, elementCounts):
string += '{0!s:<2}{1:<3d}'.format(symbol, count)
string += '\n'
# Line 2
string += '{0:< 15.8E}{1:< 15.8E}{2:< 15.8E}{3:< 15.8E}{4:< 15.8E} 2\n'.format(poly_high.c0, poly_high.c1, poly_high.c2, poly_high.c3, poly_high.c4)
# Line 3
string += '{0:< 15.8E}{1:< 15.8E}{2:< 15.8E}{3:< 15.8E}{4:< 15.8E} 3\n'.format(poly_high.c5, poly_high.c6, poly_low.c0, poly_low.c1, poly_low.c2)
# Line 4
string += '{0:< 15.8E}{1:< 15.8E}{2:< 15.8E}{3:< 15.8E} 4\n'.format(poly_low.c3, poly_low.c4, poly_low.c5, poly_low.c6)
f.write(string)
f.close()
def plot(self, outputDirectory):
"""
Plot the heat capacity, enthapy, entropy, and Gibbs free energy of the
fitted thermodynamics model, along with the same values from the
statistical mechanics model that the thermodynamics model was fitted
to. The plot is saved to the file ``thermo.pdf`` in the output
directory. The plot is not generated if ``matplotlib`` is not installed.
"""
# Skip this step if matplotlib is not installed
try:
import pylab
except ImportError:
return
Tlist = numpy.arange(10.0, 2501.0, 10.0)
Cplist = numpy.zeros_like(Tlist)
Cplist1 = numpy.zeros_like(Tlist)
Hlist = numpy.zeros_like(Tlist)
Hlist1 = numpy.zeros_like(Tlist)
Slist = numpy.zeros_like(Tlist)
Slist1 = numpy.zeros_like(Tlist)
Glist = numpy.zeros_like(Tlist)
Glist1 = numpy.zeros_like(Tlist)
conformer = self.species.conformer
thermo = self.species.thermo
for i in range(Tlist.shape[0]):
Cplist[i] = conformer.getHeatCapacity(Tlist[i])
Slist[i] = conformer.getEntropy(Tlist[i])
Hlist[i] = (conformer.getEnthalpy(Tlist[i]) + conformer.E0.value_si) * 0.001
Glist[i] = Hlist[i] - Tlist[i] * Slist[i] * 0.001
Cplist1[i] = thermo.getHeatCapacity(Tlist[i])
Slist1[i] = thermo.getEntropy(Tlist[i])
Hlist1[i] = thermo.getEnthalpy(Tlist[i]) * 0.001
Glist1[i] = thermo.getFreeEnergy(Tlist[i]) * 0.001
fig = pylab.figure(figsize=(10,8))
pylab.subplot(2,2,1)
pylab.plot(Tlist, Cplist / 4.184, '-r', Tlist, Cplist1 / 4.184, '-b')
pylab.xlabel('Temperature (K)')
pylab.ylabel('Heat capacity (cal/mol*K)')
pylab.legend(['statmech', 'thermo'], loc=4)
pylab.subplot(2,2,2)
pylab.plot(Tlist, Slist / 4.184, '-r', Tlist, Slist1 / 4.184, '-b')
pylab.xlabel('Temperature (K)')
pylab.ylabel('Entropy (cal/mol*K)')
pylab.subplot(2,2,3)
pylab.plot(Tlist, Hlist / 4.184, '-r', Tlist, Hlist1 / 4.184, '-b')
pylab.xlabel('Temperature (K)')
pylab.ylabel('Enthalpy (kcal/mol)')
pylab.subplot(2,2,4)
pylab.plot(Tlist, Glist / 4.184, '-r', Tlist, Glist1 / 4.184, '-b')
pylab.xlabel('Temperature (K)')
pylab.ylabel('Gibbs free energy (kcal/mol)')
fig.subplots_adjust(left=0.10, bottom=0.08, right=0.95, top=0.95, wspace=0.35, hspace=0.20)
pylab.savefig(os.path.join(outputDirectory, 'thermo.pdf'))
pylab.close()
| mit |
mojoboss/scikit-learn | sklearn/preprocessing/tests/test_data.py | 14 | 37957 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
BhallaLab/moose | moose-examples/traub_2005/py/testutils.py | 2 | 12024 | # test_utils.py ---
#
# Filename: test_utils.py
# Description:
# Author:
# Maintainer:
# Created: Sat May 26 10:41:37 2012 (+0530)
# Version:
# Last-Updated: Sat Aug 6 15:45:51 2016 (-0400)
# By: subha
# Update #: 414
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# Code:
import os
os.environ['NUMPTHREADS'] = '1'
import sys
sys.path.append('../../../python')
import uuid
import numpy as np
from matplotlib import pyplot as plt
import unittest
import moose
from moose import utils as mutils
import config
import channelbase
INITCLOCK = 0
ELECCLOCK = 1
CHANCLOCK = 2
POOLCLOCK = 3
LOOKUPCLOCK = 6
STIMCLOCK = 7
PLOTCLOCK = 8
SIMDT = 5e-6
PLOTDT = 0.25e-3
lib = moose.Neutral(config.modelSettings.libpath)
def setup_clocks(simdt, plotdt):
print( 'Setting up clocks: simdt', simdt, 'plotdt', plotdt)
moose.setClock(INITCLOCK, simdt)
moose.setClock(ELECCLOCK, simdt)
moose.setClock(CHANCLOCK, simdt)
moose.setClock(POOLCLOCK, simdt)
moose.setClock(LOOKUPCLOCK, simdt)
moose.setClock(STIMCLOCK, simdt)
moose.setClock(PLOTCLOCK, plotdt)
moose.le('/clock')
def assign_clocks(model_container, data_container, solver='euler'):
"""Assign clockticks to elements.
Parameters
----------
model_container: element
All model components are under this element. The model elements
are assigned clocks as required to maintain the right update
sequence.
INITCLOCK = 0 calls `init` method in Compartments
ELECCLOCK = 1 calls `process` method of Compartments
CHANCLOCK = 2 calls `process` method for HHChannels
POOLCLOCK = 3 is not used in electrical simulation
LOOKUPCLOCK = 6 is not used in these simulations.
STIMCLOCK = 7 calls `process` method in stimulus objects like
PulseGen.
data_container: element
All data recording tables are under this element. They are
assigned PLOTCLOCK, the clock whose update interval is plotdt.
PLOTCLOCK = 8 calls `process` method of Table elements under
data_container
"""
moose.useClock(STIMCLOCK,
model_container.path+'/##[TYPE=PulseGen]',
'process')
moose.useClock(PLOTCLOCK,
data_container.path+'/##[TYPE=Table]',
'process')
if solver == 'hsolve':
for neuron in moose.wildcardFind('%s/##[TYPE=Neuron]'):
solver = moose.HSolve(neuron.path+'/solve')
solver.dt = moose.element('/clock/tick[0]').dt
solver.target = neuron.path
moose.useClock(INITCLOCK,
model_container.path+'/##[TYPE=HSolve]',
'process')
else:
moose.useClock(INITCLOCK,
model_container.path+'/##[TYPE=Compartment]',
'init')
moose.useClock(ELECCLOCK,
model_container.path+'/##[TYPE=Compartment]',
'process')
moose.useClock(CHANCLOCK,
model_container.path+'/##[TYPE=HHChannel]',
'process')
moose.useClock(POOLCLOCK,
model_container.path+'/##[TYPE=CaConc]',
'process')
def step_run(simtime, steptime, verbose=True):
"""Run the simulation in steps of `steptime` for `simtime`."""
clock = moose.Clock('/clock')
if verbose:
print( 'Starting simulation for', simtime)
while clock.currentTime < simtime - steptime:
moose.start(steptime)
if verbose:
print( 'Simulated till', clock.currentTime, 's')
remaining = simtime - clock.currentTime
if remaining > 0:
if verbose:
print( 'Running the remaining', remaining, 's')
moose.start(remaining)
if verbose:
print( 'Finished simulation')
def make_testcomp(containerpath):
comp = moose.Compartment('%s/testcomp' % (containerpath))
comp.Em = -65e-3
comp.initVm = -65e-3
comp.Cm = 1e-12
comp.Rm = 1e9
comp.Ra = 1e5
return comp
def make_pulsegen(containerpath):
pulsegen = moose.PulseGen('%s/testpulse' % (containerpath))
pulsegen.firstLevel = 1e-12
pulsegen.firstDelay = 50e-3
pulsegen.firstWidth = 100e-3
pulsegen.secondLevel = -1e-12
pulsegen.secondDelay = 150e-3
pulsegen.secondWidth = 100e-3
pulsegen.count = 3
pulsegen.delay[2] = 1e9
return pulsegen
def setup_single_compartment(model_container, data_container, channel_proto, Gbar):
"""Setup a single compartment with a channel
Parameters
----------
model_container: element
The model compartment is created under this element
data_container: element
The tables to record data are created under this
channel_proto: element
Channel prototype in library
Gbar: float
Maximum conductance density of the channel
"""
comp = make_testcomp(model_container.path)
channel = moose.copy(channel_proto, comp, channel_proto.name)[0]
moose.connect(channel, 'channel', comp, 'channel')
channel.Gbar = Gbar
pulsegen = make_pulsegen(model_container.path)
moose.connect(pulsegen, 'output', comp, 'injectMsg')
vm_table = moose.Table('%s/Vm' % (data_container.path))
moose.connect(vm_table, 'requestOut', comp, 'getVm')
gk_table = moose.Table('%s/Gk' % (data_container.path))
moose.connect(gk_table, 'requestOut', channel, 'getGk')
ik_table = moose.Table('%s/Ik' % (data_container.path))
moose.connect(ik_table, 'requestOut', channel, 'getIk')
return {'compartment': comp,
'stimulus': pulsegen,
'channel': channel,
'Vm': vm_table,
'Gk': gk_table,
'Ik': ik_table}
def insert_hhchannel(compartment, channelclass, gbar):
channel = moose.copy(channelclass.prototype, compartment)
channel[0].Gbar = gbar
moose.connect(channel, 'channel', compartment, 'channel')
return channel[0]
def compare_data_arrays(left, right, relative='maxw', plot=False, x_range=None):
"""Compare two data arrays and return some measure of the
error.
The arrays must have the same number of dimensions (1 or 2) and
represent the same range of x values. In case they are 1
dimensional, we take x values as relative position of that data
point in the total x-range.
We interpolate the y values for the x-values of the series with
lower resolution using the heigher resolution series as the
interpolation table.
The error is calculated as the maximum difference between the
interpolated values and the actual values in the lower resolution
array divided by the difference between the maximum and minimum y
values of both the series.
If plot is True, left, right and their difference at common points
are plotted.
relative: `rms` - return root mean square of the error values
`taxicab` - mean of the absolute error values
`maxw` - max(abs(error))/(max(y) - min(y))
`meany` - rms(error)/mean(y)
x_range : (minx, maxx) range of X values to consider for comparison
"""
if len(left.shape) != len(right.shape):
print( left.shape, right.shape)
raise ValueError('Arrays to be compared must have same dimensions.')
# y is the intrepolation result for x array using xp and fp when xp and x do not match.
# xp and fp are interpolation table's independent and dependent variables
# yp is a view of the original y values
x = None
y = None
xp = None
fp = None
yp = None
# arbitrarily keep series with more datapoint as left
if left.shape[0] < right.shape[0]:
tmp = left
left = right
right = tmp
if len(right.shape) == 1:
x = np.arange(right.shape[0]) * 1.0 / right.shape[0]
yp = right
xp = np.arange(left.shape[0]) * 1.0 / left.shape[0]
fp = left
elif len(right.shape) == 2:
x = right[:,0]
yp = right[:,1]
xp = left[:,0]
fp = left[:,1]
else:
raise ValueError('Cannot handle more than 2 dimensional arrays.')
if left.shape[0] != right.shape[0]:
print( 'Array sizes not matching: (%d <> %d) - interpolating' % (left.shape[0], right.shape[0]))
y = np.interp(x, xp, fp)
else: # assume we have the same X values when sizes are the same
y = np.array(fp)
if x_range:
indices = np.nonzero((x > x_range[0]) & (x <= x_range[1]))[0]
y = np.array(y[indices])
yp = np.array(yp[indices])
x = np.array(x[indices])
# We update xp and fp to have the same plotting x-range
indices = np.nonzero((xp > x_range[0]) & (xp <= x_range[1]))[0]
xp = xp[indices]
fp = fp[indices]
err = y - yp
print( min(err), max(err), min(y), max(y), min(yp), max(yp))
# I measure a conservative relative error as maximum of all the
# errors between pairs of points with
all_y = np.r_[y, yp]
if plot:
plt.plot(x, yp, 'b-.', label='right')
plt.plot(xp, fp, 'g--', label='left')
plt.plot(x, err, 'r:', label='error')
plt.legend()
plt.show()
if relative == 'rms':
return np.sqrt(np.mean(err**2))
elif relative == 'taxicab':
return np.mean(np.abs(err))
elif relative == 'maxw':
return max(np.abs(err))/(max(all_y) - min(all_y))
elif relative == 'meany':
return np.sqrt(np.mean(err**2)) / np.mean(all_y)
else:
return err
import csv
def compare_cell_dump(left, right, rtol=1e-3, atol=1e-8, row_header=True, col_header=True):
"""This is a utility function to compare various compartment
parameters for a single cell model dumped in csv format using
NEURON and MOOSE."""
print( 'Comparing:', left, 'with', right)
ret = True
left_file = open(left, 'rb')
right_file = open(right, 'rb')
left_reader = csv.DictReader(left_file, delimiter=',')
right_reader = csv.DictReader(right_file, delimiter=',')
lheader = list(left_reader.fieldnames)
lheader.remove('comp')
lheader = sorted(lheader)
rheader = list(right_reader.fieldnames)
rheader.remove('comp')
rheader = sorted(rheader)
if len(lheader) != len(rheader):
print( 'Column number mismatch: left %d <-> right %d' % (len(lheader), len(rheader)))
return False
for ii in range(len(lheader)):
if lheader[ii] != rheader[ii]:
print( ii, '-th column name mismatch:', lheader[ii], '<->', rheader[ii])
return False
index = 2
left_end = False
right_end = False
while True:
try:
left_row = next(left_reader)
except StopIteration:
left_end = True
try:
right_row = next(right_reader)
except StopIteration:
right_end = True
if left_end and not right_end:
print( left, 'run out of line after', index, 'rows')
return False
if right_end and not left_end:
print( right, 'run out of line after', index, 'rows')
return False
if left_end and right_end:
return ret
if len(left_row) != len(right_row):
print( 'No. of columns differ: left - ', len(left_row), 'right -', len(right_row))
ret = False
break
for key in lheader:
try:
left = float(left_row[key])
right = float(right_row[key])
if not np.allclose(float(left), float(right), rtol=rtol, atol=atol):
print(('Mismatch in row:%s, column:%s. Values: %g <> %g' % (index, key, left, right)))
ret = False
except ValueError as e:
print(e)
print(('Row:', index, 'Key:', key, left_row[key], right_row[key]))
index = index + 1
return ret
#
# test_utils.py ends here
| gpl-3.0 |
stephenhky/PyShortTextCategorization | shorttext/utils/dtm.py | 1 | 8040 |
import numpy as np
from gensim.corpora import Dictionary
from gensim.models import TfidfModel
from scipy.sparse import dok_matrix
import pickle
from .compactmodel_io import CompactIOMachine
from .classification_exceptions import NotImplementedException
dtm_suffices = ['_docids.pkl', '_dictionary.dict', '_dtm.pkl']
class DocumentTermMatrix(CompactIOMachine):
""" Document-term matrix for corpus.
This is a class that handles the document-term matrix (DTM). With a given corpus, users can
retrieve term frequency, document frequency, and total term frequency. Weighing using tf-idf
can be applied.
"""
def __init__(self, corpus, docids=None, tfidf=False):
""" Initialize the document-term matrix (DTM) class with a given corpus.
If document IDs (docids) are given, it will be stored and output as approrpriate.
If not, the documents are indexed by numbers.
Users can choose to weigh by tf-idf. The default is not to weigh.
The corpus has to be a list of lists, with each of the inside list contains all the tokens
in each document.
:param corpus: corpus.
:param docids: list of designated document IDs. (Default: None)
:param tfidf: whether to weigh using tf-idf. (Default: False)
:type corpus: list
:type docids: list
:type tfidf: bool
"""
CompactIOMachine.__init__(self, {'classifier': 'dtm'}, 'dtm', dtm_suffices)
if docids == None:
self.docid_dict = {i: i for i in range(len(corpus))}
self.docids = range(len(corpus))
else:
if len(docids) == len(corpus):
self.docid_dict = {docid: i for i, docid in enumerate(docids)}
self.docids = docids
elif len(docids) > len(corpus):
self.docid_dict = {docid: i for i, docid in zip(range(len(corpus)), docids[:len(corpus)])}
self.docids = docids[:len(corpus)]
else:
self.docid_dict = {docid: i for i, docid in enumerate(docids)}
self.docid_dict = {i: i for i in range(len(docids), range(corpus))}
self.docids = docids + range(len(docids), range(corpus))
# generate DTM
self.generate_dtm(corpus, tfidf=tfidf)
def generate_dtm(self, corpus, tfidf=False):
""" Generate the inside document-term matrix and other peripherical information
objects. This is run when the class is instantiated.
:param corpus: corpus.
:param tfidf: whether to weigh using tf-idf. (Default: False)
:return: None
:type corpus: list
:type tfidf: bool
"""
self.dictionary = Dictionary(corpus)
self.dtm = dok_matrix((len(corpus), len(self.dictionary)), dtype=np.float)
bow_corpus = [self.dictionary.doc2bow(doctokens) for doctokens in corpus]
if tfidf:
weighted_model = TfidfModel(bow_corpus)
bow_corpus = weighted_model[bow_corpus]
for docid in self.docids:
for tokenid, count in bow_corpus[self.docid_dict[docid]]:
self.dtm[self.docid_dict[docid], tokenid] = count
def get_termfreq(self, docid, token):
""" Retrieve the term frequency of a given token in a particular document.
Given a token and a particular document ID, compute the term frequency for this
token. If `tfidf` is set to `True` while instantiating the class, it returns the weighted
term frequency.
:param docid: document ID
:param token: term or token
:return: term frequency or weighted term frequency of the given token in this document (designated by docid)
:type docid: any
:type token: str
:rtype: numpy.float
"""
return self.dtm[self.docid_dict[docid], self.dictionary.token2id[token]]
def get_total_termfreq(self, token):
""" Retrieve the total occurrences of the given token.
Compute the total occurrences of the term in all documents. If `tfidf` is set to `True`
while instantiating the class, it returns the sum of weighted term frequency.
:param token: term or token
:return: total occurrences of the given token
:type token: str
:rtype: numpy.float
"""
return sum(self.dtm[:, self.dictionary.token2id[token]].values())
def get_doc_frequency(self, token):
""" Retrieve the document frequency of the given token.
Compute the document frequency of the given token, i.e., the number of documents
that this token can be found.
:param token: term or token
:return: document frequency of the given token
:type token: str
:rtype: int
"""
return len(self.dtm[:, self.dictionary.token2id[token]].values())
def get_token_occurences(self, token):
""" Retrieve the term frequencies of a given token in all documents.
Compute the term frequencies of the given token for all the documents. If `tfidf` is
set to be `True` while instantiating the class, it returns the weighted term frequencies.
This method returns a dictionary of term frequencies with the corresponding document IDs
as the keys.
:param token: term or token
:return: a dictionary of term frequencies with the corresponding document IDs as the keys
:type token: str
:rtype: dict
"""
return {self.docids[docidx]: count for (docidx, _), count in self.dtm[:, self.dictionary.token2id[token]].items()}
def get_doc_tokens(self, docid):
""" Retrieve the term frequencies of all tokens in the given document.
Compute the term frequencies of all tokens for the given document. If `tfidf` is
set to be `True` while instantiating the class, it returns the weighted term frequencies.
This method returns a dictionary of term frequencies with the tokens as the keys.
:param docid: document ID
:return: a dictionary of term frequencies with the tokens as the keys
:type docid: any
:rtype: dict
"""
return {self.dictionary[tokenid]: count for (_, tokenid), count in self.dtm[self.docid_dict[docid], :].items()}
def generate_dtm_dataframe(self):
""" Generate the data frame of the document-term matrix. (shorttext <= 1.0.3)
Now it raises exception.
:return: data frame of the document-term matrix
:rtype: pandas.DataFrame
:raise: NotImplementedException
"""
raise NotImplementedException()
def savemodel(self, prefix):
""" Save the model.
:param prefix: prefix of the files
:return: None
:type prefix: str
"""
pickle.dump(self.docids, open(prefix+'_docids.pkl', 'wb'))
self.dictionary.save(prefix+'_dictionary.dict')
pickle.dump(self.dtm, open(prefix+'_dtm.pkl', 'wb'))
def loadmodel(self, prefix):
""" Load the model.
:param prefix: prefix of the files
:return: None
:type prefix: str
"""
self.docids = pickle.load(open(prefix+'_docids.pkl', 'rb'))
self.docid_dict = {docid: i for i, docid in enumerate(self.docids)}
self.dictionary = Dictionary.load(prefix+'_dictionary.dict')
self.dtm = pickle.load(open(prefix+'_dtm.pkl', 'rb'))
def load_DocumentTermMatrix(filename, compact=True):
""" Load presaved Document-Term Matrix (DTM).
Given the file name (if `compact` is `True`) or the prefix (if `compact` is `False`),
return the document-term matrix.
:param filename: file name or prefix
:param compact: whether it is a compact model. (Default: `True`)
:return: document-term matrix
:type filename: str
:type compact: bool
:rtype: DocumentTermMatrix
"""
dtm = DocumentTermMatrix([[]])
if compact:
dtm.load_compact_model(filename)
else:
dtm.loadmodel(filename)
return dtm | mit |
tyarkoni/pliers | pliers/tests/extractors/test_text_extractors.py | 1 | 21529 | from os.path import join
from pathlib import Path
from os import environ
import shutil
import numpy as np
import pytest
import spacy
from transformers import BertTokenizer
from pliers import config
from pliers.extractors import (DictionaryExtractor,
PartOfSpeechExtractor,
LengthExtractor,
NumUniqueWordsExtractor,
PredefinedDictionaryExtractor,
TextVectorizerExtractor,
WordEmbeddingExtractor,
VADERSentimentExtractor,
SpaCyExtractor,
BertExtractor,
BertSequenceEncodingExtractor,
BertLMExtractor,
BertSentimentExtractor,
WordCounterExtractor)
from pliers.extractors.base import merge_results
from pliers.stimuli import TextStim, ComplexTextStim
from pliers.tests.utils import get_test_data_path
TEXT_DIR = join(get_test_data_path(), 'text')
def test_text_extractor():
stim = ComplexTextStim(join(TEXT_DIR, 'sample_text.txt'),
columns='to', default_duration=1)
td = DictionaryExtractor(join(TEXT_DIR, 'test_lexical_dictionary.txt'),
variables=['length', 'frequency'])
assert td.data.shape == (7, 2)
result = td.transform(stim)[2].to_df()
assert result['duration'][0] == 1
assert result.shape == (1, 6)
assert np.isclose(result['frequency'][0], 11.729, 1e-5)
def test_text_length_extractor():
stim = TextStim(text='hello world', onset=4.2, duration=1)
ext = LengthExtractor()
result = ext.transform(stim).to_df()
assert 'text_length' in result.columns
assert result['text_length'][0] == 11
assert result['onset'][0] == 4.2
assert result['duration'][0] == 1
def test_unique_words_extractor():
stim = TextStim(text='hello hello world')
ext = NumUniqueWordsExtractor()
result = ext.transform(stim).to_df()
assert 'num_unique_words' in result.columns
assert result['num_unique_words'][0] == 2
def test_dictionary_extractor():
td = DictionaryExtractor(join(TEXT_DIR, 'test_lexical_dictionary.txt'),
variables=['length', 'frequency'])
assert td.data.shape == (7, 2)
stim = TextStim(text='annotation')
result = td.transform(stim).to_df()
assert np.isnan(result['onset'][0])
assert 'length' in result.columns
assert result['length'][0] == 10
stim2 = TextStim(text='some')
result = td.transform(stim2).to_df()
assert np.isnan(result['onset'][0])
assert 'frequency' in result.columns
assert np.isnan(result['frequency'][0])
def test_predefined_dictionary_extractor():
stim = TextStim(text='enormous')
td = PredefinedDictionaryExtractor(['aoa/Freq_pm'])
result = td.transform(stim).to_df()
assert result.shape == (1, 5)
assert 'aoa_Freq_pm' in result.columns
assert np.isclose(result['aoa_Freq_pm'][0], 10.313725, 1e-5)
def test_predefined_dictionary_retrieval():
variables = [
'affect/D.Mean.H',
'concreteness/SUBTLEX',
'subtlexusfrequency/Zipf-value',
'calgarysemanticdecision/RTclean_mean',
'massiveauditorylexicaldecision/PhonLev'
]
stim = TextStim(text='perhaps')
td = PredefinedDictionaryExtractor(variables)
result = td.transform(stim).to_df().iloc[0]
assert np.isnan(result['affect_D.Mean.H'])
assert result['concreteness_SUBTLEX'] == 6939
assert result['calgarysemanticdecision_RTclean_mean'] == 954.48
assert np.isclose(result['subtlexusfrequency_Zipf-value'], 5.1331936)
assert np.isclose(result['massiveauditorylexicaldecision_PhonLev'],
6.65101626)
def test_part_of_speech_extractor():
import nltk
nltk.download('tagsets')
stim = ComplexTextStim(join(TEXT_DIR, 'complex_stim_with_header.txt'))
result = merge_results(PartOfSpeechExtractor().transform(stim),
format='wide', extractor_names=False)
assert result.shape == (4, 54)
assert result['NN'].sum() == 1
result = result.sort_values('onset')
assert result['VBD'].iloc[3] == 1
def test_word_embedding_extractor():
pytest.importorskip('gensim')
stims = [TextStim(text='this'), TextStim(text='sentence')]
ext = WordEmbeddingExtractor(join(TEXT_DIR, 'simple_vectors.bin'),
binary=True)
result = merge_results(ext.transform(stims), extractor_names='multi',
format='wide')
assert ('WordEmbeddingExtractor', 'embedding_dim99') in result.columns
assert np.allclose(0.0010911,
result[('WordEmbeddingExtractor', 'embedding_dim0')][0])
unk = TextStim(text='nowaythisinvocab')
result = ext.transform(unk).to_df()
assert result['embedding_dim10'][0] == 0.0
ones = np.ones(100)
ext = WordEmbeddingExtractor(join(TEXT_DIR, 'simple_vectors.bin'),
binary=True, unk_vector=ones)
result = ext.transform(unk).to_df()
assert result['embedding_dim10'][0] == 1.0
ext = WordEmbeddingExtractor(join(TEXT_DIR, 'simple_vectors.bin'),
binary=True, unk_vector='random')
result = ext.transform(unk).to_df()
assert result['embedding_dim10'][0] <= 1.0
assert result['embedding_dim10'][0] >= -1.0
ext = WordEmbeddingExtractor(join(TEXT_DIR, 'simple_vectors.bin'),
binary=True, unk_vector='nothing')
result = ext.transform(unk).to_df()
assert result['embedding_dim10'][0] == 0.0
def test_vectorizer_extractor():
pytest.importorskip('sklearn')
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
result = TextVectorizerExtractor().transform(stim).to_df()
assert 'woman' in result.columns
assert result['woman'][0] == 3
from sklearn.feature_extraction.text import TfidfVectorizer
custom_vectorizer = TfidfVectorizer()
ext = TextVectorizerExtractor(vectorizer=custom_vectorizer)
stim2 = TextStim(join(TEXT_DIR, 'simple_text.txt'))
result = merge_results(ext.transform([stim, stim2]), format='wide',
extractor_names='multi')
assert ('TextVectorizerExtractor', 'woman') in result.columns
assert np.allclose(0.129568189476,
result[('TextVectorizerExtractor', 'woman')][0])
ext = TextVectorizerExtractor(vectorizer='CountVectorizer',
analyzer='char_wb',
ngram_range=(2, 2))
result = ext.transform(stim).to_df()
assert 'wo' in result.columns
assert result['wo'][0] == 6
def test_vader_sentiment_extractor():
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
ext = VADERSentimentExtractor()
result = ext.transform(stim).to_df()
assert result['sentiment_neu'][0] == 0.752
stim2 = TextStim(text='VADER is smart, handsome, and funny!')
result2 = ext.transform(stim2).to_df()
assert result2['sentiment_pos'][0] == 0.752
assert result2['sentiment_neg'][0] == 0.0
assert result2['sentiment_neu'][0] == 0.248
assert result2['sentiment_compound'][0] == 0.8439
def test_spacy_token_extractor():
pytest.importorskip('spacy')
stim = TextStim(text='This is a test.')
ext = SpaCyExtractor(extractor_type='token')
assert ext.model is not None
ext2 = SpaCyExtractor(model='en_core_web_sm')
assert isinstance(ext2.model, spacy.lang.en.English)
result = ext.transform(stim).to_df()
assert result['text'][0] == 'This'
assert result['lemma_'][0].lower() == 'this'
assert result['pos_'][0] == 'DET'
assert result['tag_'][0] == 'DT'
assert result['dep_'][0] == 'nsubj'
assert result['shape_'][0] == 'Xxxx'
assert result['is_alpha'][0] == 'True'
assert result['is_stop'][0] == 'True'
assert result['is_punct'][0] == 'False'
assert result['is_ascii'][0] == 'True'
assert result['is_digit'][0] == 'False'
assert result['sentiment'][0] == '0.0'
assert result['text'][1] == 'is'
assert result['lemma_'][1].lower() == 'be'
assert result['pos_'][1] == 'AUX'
assert result['tag_'][1] == 'VBZ'
assert result['dep_'][1] == 'ROOT'
assert result['shape_'][1] == 'xx'
assert result['is_alpha'][1] == 'True'
assert result['is_stop'][1] == 'True'
assert result['is_punct'][1] == 'False'
assert result['is_ascii'][1] == 'True'
assert result['is_digit'][1] == 'False'
assert result['sentiment'][1] == '0.0'
assert result['text'][2] == 'a'
assert result['lemma_'][2].lower() == 'a'
assert result['pos_'][2] == 'DET'
assert result['tag_'][2] == 'DT'
assert result['dep_'][2] == 'det'
assert result['shape_'][2] == 'x'
assert result['is_alpha'][2] == 'True'
assert result['is_stop'][2] == 'True'
assert result['is_punct'][2] == 'False'
assert result['is_ascii'][2] == 'True'
assert result['is_digit'][2] == 'False'
assert result['sentiment'][2] == '0.0'
assert result['text'][3] == 'test'
assert result['lemma_'][3].lower() == 'test'
assert result['pos_'][3] == 'NOUN'
assert result['tag_'][3] == 'NN'
assert result['dep_'][3] == 'attr'
assert result['shape_'][3] == 'xxxx'
assert result['is_alpha'][3] == 'True'
assert result['is_stop'][3] == 'False'
assert result['is_punct'][3] == 'False'
assert result['is_ascii'][3] == 'True'
assert result['is_digit'][3] == 'False'
assert result['sentiment'][3] == '0.0'
def test_spacy_doc_extractor():
pytest.importorskip('spacy')
stim2 = TextStim(text='This is a test. And we are testing again. This '
'should be quite interesting. Tests are totally fun.')
ext = SpaCyExtractor(extractor_type='doc')
assert ext.model is not None
result = ext.transform(stim2).to_df()
assert result['text'][0]=='This is a test. '
assert result['is_parsed'][0]
assert result['is_tagged'][0]
assert result['is_sentenced'][0]
assert result['text'][3]=='Tests are totally fun.'
assert result['is_parsed'][3]
assert result['is_tagged'][3]
assert result['is_sentenced'][3]
def test_bert_extractor():
stim = ComplexTextStim(text='This is not a tokenized sentence.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
ext_base = BertExtractor(pretrained_model='bert-base-uncased')
ext_base_token = BertExtractor(pretrained_model='bert-base-uncased',
return_input=True)
ext_tf = BertExtractor(pretrained_model='bert-base-uncased', framework='tf')
base_result = ext_base.transform(stim)
res = base_result.to_df()
res_model_attr = base_result.to_df(include_attributes=True)
res_token = ext_base_token.transform(stim).to_df()
res_file = ext_base.transform(stim_file).to_df()
res_tf = ext_tf.transform(stim).to_df()
# Test encoding shape
assert len(res['encoding'][0]) == 768
assert len(res_file['encoding'][0]) == 768
# test base extractor
assert res.shape[0] == 8
assert res_token.shape[0] == 8
assert res_token['token'][5] == '##ized'
assert res_token['word'][5] == 'tokenized'
assert res_token['object_id'][5] == 5
# test base extractor on file
assert res_file.shape[0] == 8
assert res_file['onset'][3] == 1.3
assert res_file['duration'][5] == 0.5
assert res_file['object_id'][5] == 5
# test tf vs torch
cors = [np.corrcoef(res['encoding'][i], res_tf['encoding'][i])[0,1]
for i in range(res.shape[0])]
assert all(np.isclose(cors, 1))
# catch error if framework is invalid
with pytest.raises(ValueError) as err:
BertExtractor(framework='keras')
assert 'Invalid framework' in str(err.value)
# Delete the models
del res, res_token, res_file, ext_base, ext_base_token
@pytest.mark.skipif(environ.get('TRAVIS', False) == 'true',
reason='high memory')
@pytest.mark.parametrize('model', ['bert-large-uncased',
'distilbert-base-uncased',
'roberta-base','camembert-base'])
def test_bert_other_models(model):
if model == 'camembert-base':
stim = ComplexTextStim(text='ceci n\'est pas un pipe')
else:
stim = ComplexTextStim(text='This is not a tokenized sentence.')
res = BertExtractor(pretrained_model=model, return_input=True).transform(stim).to_df()
if model == 'bert-large-uncased':
shape = 1024
else:
shape = 768
assert len(res['encoding'][0]) == shape
if model == 'camembert-base':
assert res['token'][4] == 'est'
# remove variables
del res, stim
@pytest.mark.skipif(environ.get('TRAVIS', False) == 'true',
reason='high memory')
def test_bert_sequence_extractor():
stim = ComplexTextStim(text='This is not a tokenized sentence.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
ext_pooler = BertSequenceEncodingExtractor(return_special='pooler_output')
# Test correct behavior when setting return_special
assert ext_pooler.pooling is None
assert ext_pooler.return_special == 'pooler_output'
res_sequence = BertSequenceEncodingExtractor(return_input=True).transform(stim).to_df()
res_file = BertSequenceEncodingExtractor(return_input=True).transform(stim_file).to_df()
res_cls = BertSequenceEncodingExtractor(return_special='[CLS]').transform(stim).to_df()
res_pooler = ext_pooler.transform(stim).to_df()
res_max = BertSequenceEncodingExtractor(pooling='max').transform(stim).to_df()
# Check shape
assert len(res_sequence['encoding'][0]) == 768
assert len(res_cls['encoding'][0]) == 768
assert len(res_pooler['encoding'][0]) == 768
assert len(res_max['encoding'][0]) == 768
assert res_sequence.shape[0] == 1
assert res_cls.shape[0] == 1
assert res_pooler.shape[0] == 1
assert res_max.shape[0] == 1
# Make sure pooler/cls/no arguments return different encodings
assert res_sequence['encoding'][0] != res_cls['encoding'][0]
assert res_sequence['encoding'][0] != res_pooler['encoding'][0]
assert res_sequence['encoding'][0] != res_max['encoding'][0]
assert all([res_max['encoding'][0][i] >= res_sequence['encoding'][0][i]
for i in range(768)])
# test return sequence
assert res_sequence['sequence'][0] == 'This is not a tokenized sentence .'
# test file stim
assert res_file['duration'][0] == 2.9
assert res_file['onset'][0] == 0.2
# catch error with wrong numpy function and wrong special token arg
with pytest.raises(ValueError) as err:
BertSequenceEncodingExtractor(pooling='avg')
assert 'valid numpy function' in str(err.value)
with pytest.raises(ValueError) as err:
BertSequenceEncodingExtractor(return_special='[MASK]')
assert 'must be one of' in str(err.value)
# remove variables
del ext_pooler, res_cls, res_max, res_pooler, res_sequence, res_file, stim
@pytest.mark.skipif(environ.get('TRAVIS', False) == 'true',
reason='high memory')
def test_bert_LM_extractor():
stim = ComplexTextStim(text='This is not a tokenized sentence.')
stim_masked = ComplexTextStim(text='This is MASK tokenized sentence.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
# Test mutual exclusivity and mask values
with pytest.raises(ValueError) as err:
BertLMExtractor(top_n=100, target='test')
assert 'mutually exclusive' in str(err.value)
with pytest.raises(ValueError) as err:
BertLMExtractor(top_n=100, threshold=.5)
assert 'mutually exclusive' in str(err.value)
with pytest.raises(ValueError) as err:
BertLMExtractor(target='test', threshold=.5)
assert 'mutually exclusive' in str(err.value)
with pytest.raises(ValueError) as err:
BertLMExtractor(mask=['test', 'mask'])
assert 'must be a string' in str(err.value)
with pytest.raises(ValueError) as err:
BertLMExtractor(target='nonwd')
assert 'No valid target token' in str(err.value)
target_wds = ['target','word']
ext_target = BertLMExtractor(mask=1, target=target_wds)
res = BertLMExtractor(mask=2).transform(stim).to_df()
res_file = BertLMExtractor(mask=2).transform(stim_file).to_df()
res_target = ext_target.transform(stim).to_df()
res_topn = BertLMExtractor(mask=3, top_n=100).transform(stim).to_df()
res_threshold = BertLMExtractor(mask=4, threshold=.1, return_softmax=True).transform(stim).to_df()
res_default = BertLMExtractor().transform(stim_masked).to_df()
res_return_mask = BertLMExtractor(mask=1, top_n=10, return_masked_word=True, return_input=True).transform(stim).to_df()
assert res.shape[0] == 1
# test onset/duration
assert res_file['onset'][0] == 1.0
assert res_file['duration'][0] == 0.2
# Check target words
assert all([w.capitalize() in res_target.columns for w in target_wds])
assert res_target.shape[1] == 6
# Check top_n
assert res_topn.shape[1] == 104
assert all([res_topn.iloc[:,3][0] > res_topn.iloc[:,i][0] for i in range(4,103)])
# Check threshold and range
tknz = BertTokenizer.from_pretrained('bert-base-uncased')
vocab = tknz.vocab.keys()
for v in vocab:
if v.capitalize() in res_threshold.columns:
assert res_threshold[v.capitalize()][0] >= .1
assert res_threshold[v.capitalize()][0] <= 1
# Test update mask method
assert ext_target.mask == 1
ext_target.update_mask(new_mask='sentence')
assert ext_target.mask == 'sentence'
res_target_new = ext_target.transform(stim).to_df()
assert all([res_target[c][0] != res_target_new[c][0]
for c in ['Target', 'Word']])
with pytest.raises(ValueError) as err:
ext_target.update_mask(new_mask=['some', 'mask'])
assert 'must be a string' in str(err.value)
# Test default mask
assert res_default.shape[0] == 1
# Test return mask and input
assert res_return_mask['true_word'][0] == 'is'
assert 'true_word_score' in res_return_mask.columns
assert res_return_mask['sequence'][0] == 'This is not a tokenized sentence .'
# Make sure no non-ascii tokens are dropped
assert res.shape[1] == len(vocab) + 4
# remove variables
del ext_target, res, res_file, res_target, res_topn, \
res_threshold, res_default, res_return_mask
@pytest.mark.skipif(environ.get('TRAVIS', False) == 'true',
reason='high memory')
def test_bert_sentiment_extractor():
stim = ComplexTextStim(text='This is the best day of my life.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
res = BertSentimentExtractor().transform(stim).to_df()
res_file = BertSentimentExtractor().transform(stim_file).to_df()
res_seq = BertSentimentExtractor(return_input=True).transform(stim).to_df()
res_softmax = BertSentimentExtractor(return_softmax=True).transform(stim).to_df()
assert res.shape[0] == 1
assert res_file['onset'][0] == 0.2
assert res_file['duration'][0] == 2.9
assert all([s in res.columns for s in ['sent_pos', 'sent_neg']])
assert res_seq['sequence'][0] == 'This is the best day of my life .'
assert all([res_softmax[s][0] >= 0 for s in ['sent_pos','sent_neg'] ])
assert all([res_softmax[s][0] <= 1 for s in ['sent_pos','sent_neg'] ])
# remove variables
del res, res_file, res_seq, res_softmax
def test_word_counter_extractor():
stim_txt = ComplexTextStim(text='This is a text where certain words occur'
' again and again Sometimes they are '
'lowercase sometimes they are uppercase '
'There are also words that may look '
'different but they come from the same '
'lemma Take a word like text and its '
'plural texts Oh words')
stim_with_onsets = ComplexTextStim(filename=join(TEXT_DIR,
'complex_stim_with_repetitions.txt'))
ext = WordCounterExtractor()
result_stim_txt = ext.transform(stim_txt).to_df()
result_stim_with_onsets = ext.transform(stim_with_onsets).to_df()
assert result_stim_txt.shape[0] == 45
assert all(result_stim_txt['word_count'] >= 1)
assert result_stim_txt['word_count'][15] == 2
assert result_stim_txt['word_count'][44] == 3
assert result_stim_with_onsets.shape[0] == 8
assert result_stim_with_onsets['onset'][2] == 0.8
assert result_stim_with_onsets['duration'][2] == 0.1
assert result_stim_with_onsets['word_count'][2] == 2
assert result_stim_with_onsets['word_count'][5] == 2
assert result_stim_with_onsets['word_count'][7] == 1
ext2 = WordCounterExtractor(log_scale=True)
result_stim_txt = ext2.transform(stim_txt).to_df()
assert all(result_stim_txt['log_word_count'] >= 0)
assert result_stim_txt['log_word_count'][15] == np.log(2)
assert result_stim_txt['log_word_count'][44] == np.log(3)
| bsd-3-clause |
jackwluo/py-quantmod | doc/source/conf.py | 1 | 5430 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# quantmod documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 23 22:09:25 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
# System path insert
sys.path.insert(0, os.path.abspath('../..'))
# Need to mock all the requirements
MOCK_MODULES = ['numpy',
'pandas',
'plotly',
'pandas_datareader',
'talib',
'pandas_datareader.data',
'plotly.plotly',
'plotly.offline']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.githubpages']
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Quantmod'
copyright = '2017, Jack Luo'
author = 'Jack Luo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
def setup(app):
app.add_stylesheet('custom.css')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'quantmoddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'quantmod.tex', 'quantmod Documentation',
'Jack Luo', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'quantmod', 'quantmod Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'quantmod', 'quantmod Documentation',
author, 'quantmod', 'One line description of project.',
'Miscellaneous'),
]
| mit |
jorge2703/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
belltailjp/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
khkaminska/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
nsoranzo/tools-iuc | tools/heinz/heinz_scoring.py | 21 | 3661 | #!/usr/bin/env python
"""Calculate scores for Heinz.
This script transform a p-value into a score:
1. Use alpha and lambda to calculate a threshold P-value.
2. Calculate a score based on each P-value by alpha and the threshold.
For more details, please refer to the paper doi:10.1093/bioinformatics/btn161
Input:
P-values from DESeq2 result: first column: names, second column P-values
Output:
Scores, which will be used as the input of Heinz.
First column: names, second column: scores.
Python 3 is required.
"""
# Implemented by: Chao (Cico) Zhang
# Homepage: https://Hi-IT.org
# Date: 14 Mar 2017
# Last modified: 23 May 2018
import argparse
import sys
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='Transform a P-value into a '
'score which can be used as the input of '
'Heinz')
parser.add_argument('-n', '--node', required=True, dest='nodes',
metavar='nodes_pvalue.txt', type=str,
help='Input file of nodes with P-values')
parser.add_argument('-f', '--fdr', required=True, dest='fdr',
metavar='0.007', type=float, help='Choose a value of FDR')
parser.add_argument('-m', '--model', required=False, dest='param_file',
metavar='param.txt', type=str,
help='A txt file contains model params as input')
parser.add_argument('-a', '--alpha', required=False, dest='alpha',
metavar='0.234', type=float, default=0.5,
help='Single parameter alpha as input if txt input is '
'not provided')
parser.add_argument('-l', '--lambda', required=False, dest='lam',
metavar='0.345', type=float, default=0.5,
help='Single parameter lambda as input if txt input is '
'not provided')
parser.add_argument('-o', '--output', required=True, dest='output',
metavar='scores.txt', type=str,
help='The output file to store the calculated scores')
args = parser.parse_args()
# Check if the parameters are complete
if args.output is None:
sys.exit('Output file is not designated.')
if args.nodes is None:
sys.exit('Nodes with p-values must be provided.')
if args.fdr is None:
sys.exit('FDR must be provided')
if args.fdr >= 1 or args.fdr <= 0:
sys.exit('FDR must greater than 0 and smaller than 1')
# run heinz-print according to the input type
if args.param_file is not None: # if BUM output is provided
with open(args.param_file) as p:
params = p.readlines()
lam = float(params[0]) # Maybe this is a bug
alpha = float(params[1]) # Maybe this is a bug
# if BUM output is not provided
elif args.alpha is not None and args.lam is not None:
lam = args.lam
alpha = args.alpha
else: # The input is not complete
sys.exit('The parameters of the model are incomplete.')
# Calculate the threshold P-value
pie = lam + (1 - lam) * alpha
p_threshold = np.power((pie - lam * args.fdr) / (args.fdr - lam * args.fdr),
1 / (alpha - 1))
print(p_threshold)
# Calculate the scores
input_pvalues = pd.read_csv(args.nodes, sep='\t', names=['node', 'pvalue'])
input_pvalues.loc[:, 'score'] = input_pvalues.pvalue.apply(lambda x:
(alpha - 1) * (np.log(x) - np.log(p_threshold)))
# print(input_pvalues.loc[:, ['node', 'score']])
input_pvalues.loc[:, ['node', 'score']].to_csv(args.output, sep='\t',
index=False, header=False)
| mit |
liyu1990/sklearn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
bsipocz/statsmodels | statsmodels/tsa/tests/test_tsa_tools.py | 19 | 9109 | '''tests for some time series analysis functions
'''
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
import statsmodels.api as sm
import statsmodels.tsa.stattools as tsa
import statsmodels.tsa.tsatools as tools
from statsmodels.tsa.tsatools import vec, vech
from .results import savedrvs
from .results.datamlw_tls import mlacf, mlccf, mlpacf, mlywar
xo = savedrvs.rvsdata.xar2
x100 = xo[-100:]/1000.
x1000 = xo/1000.
def test_acf():
acf_x = tsa.acf(x100, unbiased=False)[:21]
assert_array_almost_equal(mlacf.acf100.ravel(), acf_x, 8) #why only dec=8
acf_x = tsa.acf(x1000, unbiased=False)[:21]
assert_array_almost_equal(mlacf.acf1000.ravel(), acf_x, 8) #why only dec=9
def test_ccf():
ccf_x = tsa.ccf(x100[4:], x100[:-4], unbiased=False)[:21]
assert_array_almost_equal(mlccf.ccf100.ravel()[:21][::-1], ccf_x, 8)
ccf_x = tsa.ccf(x1000[4:], x1000[:-4], unbiased=False)[:21]
assert_array_almost_equal(mlccf.ccf1000.ravel()[:21][::-1], ccf_x, 8)
def test_pacf_yw():
pacfyw = tsa.pacf_yw(x100, 20, method='mle')
assert_array_almost_equal(mlpacf.pacf100.ravel(), pacfyw, 1)
pacfyw = tsa.pacf_yw(x1000, 20, method='mle')
assert_array_almost_equal(mlpacf.pacf1000.ravel(), pacfyw, 2)
#assert False
def test_pacf_ols():
pacfols = tsa.pacf_ols(x100, 20)
assert_array_almost_equal(mlpacf.pacf100.ravel(), pacfols, 8)
pacfols = tsa.pacf_ols(x1000, 20)
assert_array_almost_equal(mlpacf.pacf1000.ravel(), pacfols, 8)
#assert False
def test_ywcoef():
assert_array_almost_equal(mlywar.arcoef100[1:],
-sm.regression.yule_walker(x100, 10, method='mle')[0], 8)
assert_array_almost_equal(mlywar.arcoef1000[1:],
-sm.regression.yule_walker(x1000, 20, method='mle')[0], 8)
def test_yule_walker_inter():
# see 1869
x = np.array([1, -1, 2, 2, 0, -2, 1, 0, -3, 0, 0])
# it works
result = sm.regression.yule_walker(x, 3)
def test_duplication_matrix():
for k in range(2, 10):
m = tools.unvech(np.random.randn(k * (k + 1) // 2))
Dk = tools.duplication_matrix(k)
assert(np.array_equal(vec(m), np.dot(Dk, vech(m))))
def test_elimination_matrix():
for k in range(2, 10):
m = np.random.randn(k, k)
Lk = tools.elimination_matrix(k)
assert(np.array_equal(vech(m), np.dot(Lk, vec(m))))
def test_commutation_matrix():
m = np.random.randn(4, 3)
K = tools.commutation_matrix(4, 3)
assert(np.array_equal(vec(m.T), np.dot(K, vec(m))))
def test_vec():
arr = np.array([[1, 2],
[3, 4]])
assert(np.array_equal(vec(arr), [1, 3, 2, 4]))
def test_vech():
arr = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert(np.array_equal(vech(arr), [1, 4, 7, 5, 8, 9]))
def test_add_lag_insert():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:3],lagmat,nddata[3:,-1]))
lag_data = sm.tsa.add_lag(data, 'realgdp', 3)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
def test_add_lag_noinsert():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:],lagmat))
lag_data = sm.tsa.add_lag(data, 'realgdp', 3, insert=False)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
def test_add_lag_noinsert_atend():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,-1],3,trim='Both')
results = np.column_stack((nddata[3:,:],lagmat))
lag_data = sm.tsa.add_lag(data, 'cpi', 3, insert=False)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
# should be the same as insert
lag_data2 = sm.tsa.add_lag(data, 'cpi', 3, insert=True)
assert_equal(lag_data2.view((float,len(lag_data2.dtype.names))), results)
def test_add_lag_ndarray():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:3],lagmat,nddata[3:,-1]))
lag_data = sm.tsa.add_lag(nddata, 2, 3)
assert_equal(lag_data, results)
def test_add_lag_noinsert_ndarray():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:],lagmat))
lag_data = sm.tsa.add_lag(nddata, 2, 3, insert=False)
assert_equal(lag_data, results)
def test_add_lag_noinsertatend_ndarray():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,-1],3,trim='Both')
results = np.column_stack((nddata[3:,:],lagmat))
lag_data = sm.tsa.add_lag(nddata, 3, 3, insert=False)
assert_equal(lag_data, results)
# should be the same as insert also check negative col number
lag_data2 = sm.tsa.add_lag(nddata, -1, 3, insert=True)
assert_equal(lag_data2, results)
def test_add_lag1d():
data = np.random.randn(100)
lagmat = sm.tsa.lagmat(data,3,trim='Both')
results = np.column_stack((data[3:],lagmat))
lag_data = sm.tsa.add_lag(data, lags=3, insert=True)
assert_equal(results, lag_data)
# add index
data = data[:,None]
lagmat = sm.tsa.lagmat(data,3,trim='Both') # test for lagmat too
results = np.column_stack((data[3:],lagmat))
lag_data = sm.tsa.add_lag(data,lags=3, insert=True)
assert_equal(results, lag_data)
def test_add_lag1d_drop():
data = np.random.randn(100)
lagmat = sm.tsa.lagmat(data,3,trim='Both')
lag_data = sm.tsa.add_lag(data, lags=3, drop=True, insert=True)
assert_equal(lagmat, lag_data)
# no insert, should be the same
lag_data = sm.tsa.add_lag(data, lags=3, drop=True, insert=False)
assert_equal(lagmat, lag_data)
def test_add_lag1d_struct():
data = np.zeros(100, dtype=[('variable',float)])
nddata = np.random.randn(100)
data['variable'] = nddata
lagmat = sm.tsa.lagmat(nddata,3,trim='Both', original='in')
lag_data = sm.tsa.add_lag(data, 'variable', lags=3, insert=True)
assert_equal(lagmat, lag_data.view((float,4)))
lag_data = sm.tsa.add_lag(data, 'variable', lags=3, insert=False)
assert_equal(lagmat, lag_data.view((float,4)))
lag_data = sm.tsa.add_lag(data, lags=3, insert=True)
assert_equal(lagmat, lag_data.view((float,4)))
def test_add_lag_1d_drop_struct():
data = np.zeros(100, dtype=[('variable',float)])
nddata = np.random.randn(100)
data['variable'] = nddata
lagmat = sm.tsa.lagmat(nddata,3,trim='Both')
lag_data = sm.tsa.add_lag(data, lags=3, drop=True)
assert_equal(lagmat, lag_data.view((float,3)))
def test_add_lag_drop_insert():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,:2],lagmat,nddata[3:,-1]))
lag_data = sm.tsa.add_lag(data, 'realgdp', 3, drop=True)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
def test_add_lag_drop_noinsert():
data = sm.datasets.macrodata.load().data[['year','quarter','realgdp','cpi']]
nddata = data.view((float,4))
lagmat = sm.tsa.lagmat(nddata[:,2],3,trim='Both')
results = np.column_stack((nddata[3:,np.array([0,1,3])],lagmat))
lag_data = sm.tsa.add_lag(data, 'realgdp', 3, insert=False, drop=True)
assert_equal(lag_data.view((float,len(lag_data.dtype.names))), results)
def test_freq_to_period():
from pandas.tseries.frequencies import to_offset
freqs = ['A', 'AS-MAR', 'Q', 'QS', 'QS-APR', 'W', 'W-MON', 'B']
expected = [1, 1, 4, 4, 4, 52, 52, 52]
for i, j in zip(freqs, expected):
assert_equal(tools.freq_to_period(i), j)
assert_equal(tools.freq_to_period(to_offset(i)), j)
def test_detrend():
data = np.arange(5)
assert_array_almost_equal(sm.tsa.detrend(data,order=1),np.zeros_like(data))
assert_array_almost_equal(sm.tsa.detrend(data,order=0),[-2,-1,0,1,2])
data = np.arange(10).reshape(5,2)
assert_array_almost_equal(sm.tsa.detrend(data,order=1,axis=0),np.zeros_like(data))
assert_array_almost_equal(sm.tsa.detrend(data,order=0,axis=0),[[-4, -4], [-2, -2], [0, 0], [2, 2], [4, 4]])
assert_array_almost_equal(sm.tsa.detrend(data,order=0,axis=1),[[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]])
if __name__ == '__main__':
#running them directly
# test_acf()
# test_ccf()
# test_pacf_yw()
# test_pacf_ols()
# test_ywcoef()
import nose
nose.runmodule()
| bsd-3-clause |
krez13/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
kaffeebrauer/Lean | Algorithm.Framework/Alphas/PearsonCorrelationPairsTradingAlphaModel.py | 8 | 4930 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm.Framework")
AddReference("QuantConnect.Indicators")
from QuantConnect import *
from QuantConnect.Indicators import *
from QuantConnect.Algorithm.Framework.Alphas import *
from Alphas.BasePairsTradingAlphaModel import BasePairsTradingAlphaModel
from datetime import timedelta
from scipy.stats import pearsonr
import numpy as np
import pandas as pd
class PearsonCorrelationPairsTradingAlphaModel(BasePairsTradingAlphaModel):
''' This alpha model is designed to rank every pair combination by its pearson correlation
and trade the pair with the hightest correlation
This model generates alternating long ratio/short ratio insights emitted as a group'''
def __init__(self, lookback = 15,
resolution = Resolution.Minute,
threshold = 1,
minimumCorrelation = .5):
'''Initializes a new instance of the PearsonCorrelationPairsTradingAlphaModel class
Args:
lookback: lookback period of the analysis
resolution: analysis resolution
threshold: The percent [0, 100] deviation of the ratio from the mean before emitting an insight
minimumCorrelation: The minimum correlation to consider a tradable pair'''
super().__init__(lookback, resolution, threshold)
self.lookback = lookback
self.resolution = resolution
self.minimumCorrelation = minimumCorrelation
self.best_pair = ()
def OnSecuritiesChanged(self, algorithm, changes):
'''Event fired each time the we add/remove securities from the data feed.
Args:
algorithm: The algorithm instance that experienced the change in securities
changes: The security additions and removals from the algorithm'''
for security in changes.AddedSecurities:
self.Securities.append(security)
for security in changes.RemovedSecurities:
if security in self.Securities:
self.Securities.remove(security)
symbols = [ x.Symbol for x in self.Securities ]
history = algorithm.History(symbols, self.lookback, self.resolution).close.unstack(level=0)
if not history.empty:
df = self.get_price_dataframe(history)
stop = len(df.columns)
corr = dict()
for i in range(0, stop):
for j in range(i+1, stop):
if (j, i) not in corr:
corr[(i, j)] = pearsonr(df.iloc[:,i], df.iloc[:,j])[0]
corr = sorted(corr.items(), key = lambda kv: kv[1])
if corr[-1][1] >= self.minimumCorrelation:
self.best_pair = (symbols[corr[-1][0][0]], symbols[corr[-1][0][1]])
super().OnSecuritiesChanged(algorithm, changes)
def HasPassedTest(self, algorithm, asset1, asset2):
'''Check whether the assets pass a pairs trading test
Args:
algorithm: The algorithm instance that experienced the change in securities
asset1: The first asset's symbol in the pair
asset2: The second asset's symbol in the pair
Returns:
True if the statistical test for the pair is successful'''
return self.best_pair is not None and self.best_pair == (asset1, asset2)
def get_price_dataframe(self, df):
timezones = { x.Symbol.Value: x.Exchange.TimeZone for x in self.Securities }
# Use log prices
df = np.log(df)
is_single_timeZone = len(set(timezones.values())) == 1
if not is_single_timeZone:
series_dict = dict()
for column in df:
# Change the dataframe index from data time to UTC time
to_utc = lambda x: Extensions.ConvertToUtc(x, timezones[column])
if self.resolution == Resolution.Daily:
to_utc = lambda x: Extensions.ConvertToUtc(x, timezones[column]).date()
data = df[[column]]
data.index = data.index.map(to_utc)
series_dict[column] = data[column]
df = pd.DataFrame(series_dict).dropna()
return (df - df.shift(1)).dropna() | apache-2.0 |
yyjiang/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
gVallverdu/pymatgen | pymatgen/analysis/pourbaix_diagram.py | 1 | 41344 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module is intended to be used to compute Pourbaix diagrams
of arbitrary compositions and formation energies. If you use
this module in your work, please consider citing the following:
General formalism for solid-aqueous equilibria from DFT:
Persson et al., DOI: 10.1103/PhysRevB.85.235438
Decomposition maps, or Pourbaix hull diagrams
Singh et al., DOI: 10.1021/acs.chemmater.7b03980
Fast computation of many-element Pourbaix diagrams:
Patel et al., https://arxiv.org/abs/1909.00035 (submitted)
"""
import logging
import numpy as np
import itertools
import re
from copy import deepcopy
from functools import cmp_to_key, partial, lru_cache
from monty.json import MSONable, MontyDecoder
from multiprocessing import Pool
import warnings
from scipy.spatial import ConvexHull, HalfspaceIntersection
try:
from scipy.special import comb
except ImportError:
from scipy.misc import comb
from pymatgen.util.coord import Simplex
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.core.periodic_table import Element
from pymatgen.core.composition import Composition
from pymatgen.core.ion import Ion
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
from pymatgen.analysis.phase_diagram import PhaseDiagram, PDEntry
from tqdm import tqdm
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.4"
__maintainer__ = "Joseph Montoya"
__credits__ = "Arunima Singh, Joseph Montoya, Anjli Patel"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Nov 1, 2012"
logger = logging.getLogger(__name__)
MU_H2O = -2.4583
PREFAC = 0.0591
# TODO: Revise to more closely reflect PDEntry, invoke from energy/composition
# TODO: PourbaixEntries depend implicitly on having entry energies be
# formation energies, should be a better way to get from raw energies
# TODO: uncorrected_energy is a bit of a misnomer, but not sure what to rename
class PourbaixEntry(MSONable):
"""
An object encompassing all data relevant to a solid or ion
in a pourbaix diagram. Each bulk solid/ion has an energy
g of the form: e = e0 + 0.0591 log10(conc) - nO mu_H2O
+ (nH - 2nO) pH + phi (-nH + 2nO + q)
Note that the energies corresponding to the input entries
should be formation energies with respect to hydrogen and
oxygen gas in order for the pourbaix diagram formalism to
work. This may be changed to be more flexible in the future.
"""
def __init__(self, entry, entry_id=None, concentration=1e-6):
"""
Args:
entry (ComputedEntry/ComputedStructureEntry/PDEntry/IonEntry): An
entry object
entry_id ():
concentration ():
"""
self.entry = entry
if isinstance(entry, IonEntry):
self.concentration = concentration
self.phase_type = "Ion"
self.charge = entry.ion.charge
else:
self.concentration = 1.0
self.phase_type = "Solid"
self.charge = 0.0
self.uncorrected_energy = entry.energy
if entry_id is not None:
self.entry_id = entry_id
elif hasattr(entry, "entry_id") and entry.entry_id:
self.entry_id = entry.entry_id
else:
self.entry_id = None
@property
def npH(self):
"""
Returns:
"""
return self.entry.composition.get("H", 0.) - 2 * self.entry.composition.get("O", 0.)
@property
def nH2O(self):
"""
Returns: Number of H2O.
"""
return self.entry.composition.get("O", 0.)
@property
def nPhi(self):
"""
Returns: Number of H2O.
"""
return self.npH - self.charge
@property
def name(self):
"""
Returns: Name for entry
"""
if self.phase_type == "Solid":
return self.entry.composition.reduced_formula + "(s)"
elif self.phase_type == "Ion":
return self.entry.name
@property
def energy(self):
"""
returns energy
Returns (float): total energy of the pourbaix
entry (at pH, V = 0 vs. SHE)
"""
# Note: this implicitly depends on formation energies as input
return self.uncorrected_energy + self.conc_term - (MU_H2O * self.nH2O)
@property
def energy_per_atom(self):
"""
energy per atom of the pourbaix entry
Returns (float): energy per atom
"""
return self.energy / self.composition.num_atoms
def energy_at_conditions(self, pH, V):
"""
Get free energy for a given pH and V
Args:
pH (float): pH at which to evaluate free energy
V (float): voltage at which to evaluate free energy
Returns:
free energy at conditions
"""
return self.energy + self.npH * PREFAC * pH + self.nPhi * V
def get_element_fraction(self, element):
"""
Gets the elemental fraction of a given non-OH element
Args:
element (Element or str): string or element corresponding
to element to get from composition
Returns:
fraction of element / sum(all non-OH elements)
"""
return self.composition.get(element) * self.normalization_factor
@property
def normalized_energy(self):
"""
Returns:
energy normalized by number of non H or O atoms, e. g.
for Zn2O6, energy / 2 or for AgTe3(OH)3, energy / 4
"""
return self.energy * self.normalization_factor
def normalized_energy_at_conditions(self, pH, V):
"""
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
"""
return self.energy_at_conditions(pH, V) * self.normalization_factor
@property
def conc_term(self):
"""
Returns the concentration contribution to the free energy,
and should only be present when there are ions in the entry
"""
return PREFAC * np.log10(self.concentration)
# TODO: not sure if these are strictly necessary with refactor
def as_dict(self):
"""
Returns dict which contains Pourbaix Entry data.
Note that the pH, voltage, H2O factors are always calculated when
constructing a PourbaixEntry object.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if isinstance(self.entry, IonEntry):
d["entry_type"] = "Ion"
else:
d["entry_type"] = "Solid"
d["entry"] = self.entry.as_dict()
d["concentration"] = self.concentration
d["entry_id"] = self.entry_id
return d
@classmethod
def from_dict(cls, d):
"""
Invokes
"""
entry_type = d["entry_type"]
if entry_type == "Ion":
entry = IonEntry.from_dict(d["entry"])
else:
entry = PDEntry.from_dict(d["entry"])
entry_id = d["entry_id"]
concentration = d["concentration"]
return PourbaixEntry(entry, entry_id, concentration)
@property
def normalization_factor(self):
"""
Sum of number of atoms minus the number of H and O in composition
"""
return 1.0 / (self.num_atoms - self.composition.get('H', 0)
- self.composition.get('O', 0))
@property
def composition(self):
"""
Returns composition
"""
return self.entry.composition
@property
def num_atoms(self):
"""
Return number of atoms in current formula. Useful for normalization
"""
return self.composition.num_atoms
def __repr__(self):
return "Pourbaix Entry : {} with energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {} ".format(
self.entry.composition, self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id)
def __str__(self):
return self.__repr__()
class MultiEntry(PourbaixEntry):
"""
PourbaixEntry-like object for constructing multi-elemental Pourbaix
diagrams.
"""
def __init__(self, entry_list, weights=None):
"""
Initializes a MultiEntry.
Args:
entry_list ([PourbaixEntry]): List of component PourbaixEntries
weights ([float]): Weights associated with each entry. Default is None
"""
if weights is None:
self.weights = [1.0] * len(entry_list)
else:
self.weights = weights
self.entry_list = entry_list
@lru_cache()
def __getattr__(self, item):
"""
Because most of the attributes here are just weighted
averages of the entry_list, we save some space by
having a set of conditionals to define the attributes
"""
# Attributes that are weighted averages of entry attributes
if item in ["energy", "npH", "nH2O", "nPhi", "conc_term",
"composition", "uncorrected_energy"]:
# TODO: Composition could be changed for compat with sum
if item == "composition":
start = Composition({})
else:
start = 0
return sum([getattr(e, item) * w
for e, w in zip(self.entry_list, self.weights)], start)
# Attributes that are just lists of entry attributes
elif item in ["entry_id", "phase_type"]:
return [getattr(e, item) for e in self.entry_list]
# normalization_factor, num_atoms should work from superclass
return self.__getattribute__(item)
@property
def name(self):
"""
MultiEntry name, i. e. the name of each entry joined by ' + '
"""
return " + ".join([e.name for e in self.entry_list])
def __repr__(self):
return "Multiple Pourbaix Entry: energy = {:.4f}, npH = {}, nPhi = {}, nH2O = {}, entry_id = {}, species: {}" \
.format(self.energy, self.npH, self.nPhi, self.nH2O, self.entry_id, self.name)
def __str__(self):
return self.__repr__()
def as_dict(self):
"""
Returns: MSONable dict
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry_list": [e.as_dict() for e in self.entry_list],
"weights": self.weights}
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict representation
Returns:
MultiEntry
"""
entry_list = [PourbaixEntry.from_dict(e) for e in d.get("entry_list")]
return cls(entry_list, d.get("weights"))
# TODO: this class isn't particularly useful in its current form, could be
# refactored to include information about the reference solid
class IonEntry(PDEntry):
"""
Object similar to PDEntry, but contains an Ion object instead of a
Composition object.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
"""
def __init__(self, ion, energy, name=None, attribute=None):
"""
Args:
ion: Ion object
energy: Energy for composition.
name: Optional parameter to name the entry. Defaults to the
chemical formula.
"""
self.ion = ion
# Auto-assign name
name = name if name else self.ion.reduced_formula
super(IonEntry, self).__init__(
composition=ion.composition, energy=energy, name=name,
attribute=attribute)
@classmethod
def from_dict(cls, d):
"""
Returns an IonEntry object from a dict.
"""
return IonEntry(Ion.from_dict(d["ion"]), d["energy"], d.get("name"),
d.get("attribute"))
def as_dict(self):
"""
Creates a dict of composition, energy, and ion name
"""
d = {"ion": self.ion.as_dict(), "energy": self.energy,
"name": self.name}
return d
def __repr__(self):
return "IonEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
def ion_or_solid_comp_object(formula):
"""
Returns either an ion object or composition object given
a formula.
Args:
formula: String formula. Eg. of ion: NaOH(aq), Na[+];
Eg. of solid: Fe2O3(s), Fe(s), Na2O
Returns:
Composition/Ion object
"""
m = re.search(r"\[([^\[\]]+)\]|\(aq\)", formula)
if m:
comp_obj = Ion.from_formula(formula)
elif re.search(r"\(s\)", formula):
comp_obj = Composition(formula[:-3])
else:
comp_obj = Composition(formula)
return comp_obj
ELEMENTS_HO = {Element('H'), Element('O')}
# TODO: the solids filter breaks some of the functionality of the
# heatmap plotter, because the reference states for decomposition
# don't include oxygen/hydrogen in the OER/HER regions
# TODO: create a from_phase_diagram class method for non-formation energy
# invocation
# TODO: invocation from a MultiEntry entry list could be a bit more robust
# TODO: serialization is still a bit rough around the edges
class PourbaixDiagram(MSONable):
"""
Class to create a Pourbaix diagram from entries
"""
def __init__(self, entries, comp_dict=None, conc_dict=None,
filter_solids=False, nproc=None):
"""
Args:
entries ([PourbaixEntry] or [MultiEntry]): Entries list
containing Solids and Ions or a list of MultiEntries
comp_dict ({str: float}): Dictionary of compositions,
defaults to equal parts of each elements
conc_dict ({str: float}): Dictionary of ion concentrations,
defaults to 1e-6 for each element
filter_solids (bool): applying this filter to a pourbaix
diagram ensures all included phases are filtered by
stability on the compositional phase diagram. This
breaks some of the functionality of the analysis,
though, so use with caution.
nproc (int): number of processes to generate multientries with
in parallel. Defaults to None (serial processing)
"""
entries = deepcopy(entries)
# Get non-OH elements
self.pbx_elts = set(itertools.chain.from_iterable(
[entry.composition.elements for entry in entries]))
self.pbx_elts = list(self.pbx_elts - ELEMENTS_HO)
self.dim = len(self.pbx_elts) - 1
# Process multientry inputs
if isinstance(entries[0], MultiEntry):
self._processed_entries = entries
# Extract individual entries
single_entries = list(set(itertools.chain.from_iterable(
[e.entry_list for e in entries])))
self._unprocessed_entries = single_entries
self._filtered_entries = single_entries
self._conc_dict = None
self._elt_comp = {k: v for k, v in entries[0].composition.items()
if k not in ELEMENTS_HO}
self._multielement = True
# Process single entry inputs
else:
# Set default conc/comp dicts
if not comp_dict:
comp_dict = {elt.symbol: 1. / len(self.pbx_elts) for elt in self.pbx_elts}
if not conc_dict:
conc_dict = {elt.symbol: 1e-6 for elt in self.pbx_elts}
self._conc_dict = conc_dict
self._elt_comp = comp_dict
self.pourbaix_elements = self.pbx_elts
solid_entries = [entry for entry in entries
if entry.phase_type == "Solid"]
ion_entries = [entry for entry in entries
if entry.phase_type == "Ion"]
# If a conc_dict is specified, override individual entry concentrations
for entry in ion_entries:
ion_elts = list(set(entry.composition.elements) - ELEMENTS_HO)
# TODO: the logic here for ion concentration setting is in two
# places, in PourbaixEntry and here, should be consolidated
if len(ion_elts) == 1:
entry.concentration = conc_dict[ion_elts[0].symbol] \
* entry.normalization_factor
elif len(ion_elts) > 1 and not entry.concentration:
raise ValueError("Elemental concentration not compatible "
"with multi-element ions")
self._unprocessed_entries = solid_entries + ion_entries
if not len(solid_entries + ion_entries) == len(entries):
raise ValueError("All supplied entries must have a phase type of "
"either \"Solid\" or \"Ion\"")
if filter_solids:
# O is 2.46 b/c pbx entry finds energies referenced to H2O
entries_HO = [ComputedEntry('H', 0), ComputedEntry('O', 2.46)]
solid_pd = PhaseDiagram(solid_entries + entries_HO)
solid_entries = list(set(solid_pd.stable_entries) - set(entries_HO))
self._filtered_entries = solid_entries + ion_entries
if len(comp_dict) > 1:
self._multielement = True
self._processed_entries = self._preprocess_pourbaix_entries(
self._filtered_entries, nproc=nproc)
else:
self._processed_entries = self._filtered_entries
self._multielement = False
self._stable_domains, self._stable_domain_vertices = \
self.get_pourbaix_domains(self._processed_entries)
def _convert_entries_to_points(self, pourbaix_entries):
"""
Args:
pourbaix_entries ([PourbaixEntry]): list of pourbaix entries
to process into vectors in nph-nphi-composition space
Returns:
list of vectors, [[nph, nphi, e0, x1, x2, ..., xn-1]]
corresponding to each entry in nph-nphi-composition space
"""
vecs = [[entry.npH, entry.nPhi, entry.energy] +
[entry.composition.get(elt) for elt in self.pbx_elts[:-1]]
for entry in pourbaix_entries]
vecs = np.array(vecs)
norms = np.transpose([[entry.normalization_factor
for entry in pourbaix_entries]])
vecs *= norms
return vecs
def _get_hull_in_nph_nphi_space(self, entries):
"""
Generates convex hull of pourbaix diagram entries in composition,
npH, and nphi space. This enables filtering of multi-entries
such that only compositionally stable combinations of entries
are included.
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to construct
the convex hull
Returns: list of entries and stable facets corresponding to that
list of entries
"""
ion_entries = [entry for entry in entries
if entry.phase_type == "Ion"]
solid_entries = [entry for entry in entries
if entry.phase_type == "Solid"]
# Pre-filter solids based on min at each composition
logger.debug("Pre-filtering solids by min energy at each composition")
sorted_entries = sorted(
solid_entries, key=lambda x: (x.composition.reduced_composition,
x.entry.energy_per_atom))
grouped_by_composition = itertools.groupby(
sorted_entries, key=lambda x: x.composition.reduced_composition)
min_entries = [list(grouped_entries)[0]
for comp, grouped_entries in grouped_by_composition]
min_entries += ion_entries
logger.debug("Constructing nph-nphi-composition points for qhull")
vecs = self._convert_entries_to_points(min_entries)
maxes = np.max(vecs[:, :3], axis=0)
extra_point = np.concatenate(
[maxes, np.ones(self.dim) / self.dim], axis=0)
# Add padding for extra point
pad = 1000
extra_point[2] += pad
points = np.concatenate([vecs, np.array([extra_point])], axis=0)
logger.debug("Constructing convex hull in nph-nphi-composition space")
hull = ConvexHull(points, qhull_options="QJ i")
# Create facets and remove top
facets = [facet for facet in hull.simplices
if not len(points) - 1 in facet]
if self.dim > 1:
logger.debug("Filtering facets by pourbaix composition")
valid_facets = []
for facet in facets:
comps = vecs[facet][:, 3:]
full_comps = np.concatenate([
comps, 1 - np.sum(comps, axis=1).reshape(len(comps), 1)], axis=1)
# Ensure an compositional interior point exists in the simplex
if np.linalg.matrix_rank(full_comps) > self.dim:
valid_facets.append(facet)
else:
valid_facets = facets
return min_entries, valid_facets
def _preprocess_pourbaix_entries(self, entries, nproc=None):
"""
Generates multi-entries for pourbaix diagram
Args:
entries ([PourbaixEntry]): list of PourbaixEntries to preprocess
into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
Returns:
([MultiEntry]) list of stable MultiEntry candidates
"""
# Get composition
tot_comp = Composition(self._elt_comp)
min_entries, valid_facets = self._get_hull_in_nph_nphi_space(entries)
combos = []
for facet in valid_facets:
for i in range(1, self.dim + 2):
these_combos = list()
for combo in itertools.combinations(facet, i):
these_entries = [min_entries[i] for i in combo]
these_combos.append(frozenset(these_entries))
combos.append(these_combos)
all_combos = set(itertools.chain.from_iterable(combos))
list_combos = []
for i in all_combos:
list_combos.append(list(i))
all_combos = list_combos
multi_entries = []
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=tot_comp)
with Pool(nproc) as p:
multi_entries = list(tqdm(p.imap(f, all_combos),
total=len(all_combos)))
multi_entries = list(filter(bool, multi_entries))
else:
# Serial processing of multi-entry generation
for combo in tqdm(all_combos):
multi_entry = self.process_multientry(combo, prod_comp=tot_comp)
if multi_entry:
multi_entries.append(multi_entry)
return multi_entries
def _generate_multielement_entries(self, entries, nproc=None):
"""
Create entries for multi-element Pourbaix construction.
This works by finding all possible linear combinations
of entries that can result in the specified composition
from the initialized comp_dict.
Args:
entries ([PourbaixEntries]): list of pourbaix entries
to process into MultiEntries
nproc (int): number of processes to be used in parallel
treatment of entry combos
"""
N = len(self._elt_comp) # No. of elements
total_comp = Composition(self._elt_comp)
# generate all combinations of compounds that have all elements
entry_combos = [itertools.combinations(
entries, j + 1) for j in range(N)]
entry_combos = itertools.chain.from_iterable(entry_combos)
entry_combos = filter(lambda x: total_comp < MultiEntry(x).composition,
entry_combos)
# Generate and filter entries
processed_entries = []
total = sum([comb(len(entries), j + 1)
for j in range(N)])
if total > 1e6:
warnings.warn("Your pourbaix diagram includes {} entries and may "
"take a long time to generate.".format(total))
# Parallel processing of multi-entry generation
if nproc is not None:
f = partial(self.process_multientry, prod_comp=total_comp)
with Pool(nproc) as p:
processed_entries = list(tqdm(p.imap(f, entry_combos),
total=total))
processed_entries = list(filter(bool, processed_entries))
# Serial processing of multi-entry generation
else:
for entry_combo in entry_combos:
processed_entry = self.process_multientry(entry_combo, total_comp)
if processed_entry is not None:
processed_entries.append(processed_entry)
return processed_entries
@staticmethod
def process_multientry(entry_list, prod_comp, coeff_threshold=1e-4):
"""
Static method for finding a multientry based on
a list of entries and a product composition.
Essentially checks to see if a valid aqueous
reaction exists between the entries and the
product composition and returns a MultiEntry
with weights according to the coefficients if so.
Args:
entry_list ([Entry]): list of entries from which to
create a MultiEntry
prod_comp (Composition): composition constraint for setting
weights of MultiEntry
coeff_threshold (float): threshold of stoichiometric
coefficients to filter, if weights are lower than
this value, the entry is not returned
"""
dummy_oh = [Composition("H"), Composition("O")]
try:
# Get balanced reaction coeffs, ensuring all < 0 or conc thresh
# Note that we get reduced compositions for solids and non-reduced
# compositions for ions because ions aren't normalized due to
# their charge state.
entry_comps = [e.composition for e in entry_list]
rxn = Reaction(entry_comps + dummy_oh, [prod_comp])
react_coeffs = [-rxn.get_coeff(comp) for comp in entry_comps]
all_coeffs = react_coeffs + [rxn.get_coeff(prod_comp)]
# Check if reaction coeff threshold met for pourbaix compounds
# All reactant/product coefficients must be positive nonzero
if all([coeff > coeff_threshold for coeff in all_coeffs]):
return MultiEntry(entry_list, weights=react_coeffs)
else:
return None
except ReactionError:
return None
@staticmethod
def get_pourbaix_domains(pourbaix_entries, limits=None):
"""
Returns a set of pourbaix stable domains (i. e. polygons) in
pH-V space from a list of pourbaix_entries
This function works by using scipy's HalfspaceIntersection
function to construct all of the 2-D polygons that form the
boundaries of the planes corresponding to individual entry
gibbs free energies as a function of pH and V. Hyperplanes
of the form a*pH + b*V + 1 - g(0, 0) are constructed and
supplied to HalfspaceIntersection, which then finds the
boundaries of each pourbaix region using the intersection
points.
Args:
pourbaix_entries ([PourbaixEntry]): Pourbaix entries
with which to construct stable pourbaix domains
limits ([[float]]): limits in which to do the pourbaix
analysis
Returns:
Returns a dict of the form {entry: [boundary_points]}.
The list of boundary points are the sides of the N-1
dim polytope bounding the allowable ph-V range of each entry.
"""
if limits is None:
limits = [[-2, 16], [-4, 4]]
# Get hyperplanes
hyperplanes = [np.array([-PREFAC * entry.npH, -entry.nPhi,
0, -entry.energy]) * entry.normalization_factor
for entry in pourbaix_entries]
hyperplanes = np.array(hyperplanes)
hyperplanes[:, 2] = 1
max_contribs = np.max(np.abs(hyperplanes), axis=0)
g_max = np.dot(-max_contribs, [limits[0][1], limits[1][1], 0, 1])
# Add border hyperplanes and generate HalfspaceIntersection
border_hyperplanes = [[-1, 0, 0, limits[0][0]],
[1, 0, 0, -limits[0][1]],
[0, -1, 0, limits[1][0]],
[0, 1, 0, -limits[1][1]],
[0, 0, -1, 2 * g_max]]
hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
interior_point = np.average(limits, axis=1).tolist() + [g_max]
hs_int = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# organize the boundary points by entry
pourbaix_domains = {entry: [] for entry in pourbaix_entries}
for intersection, facet in zip(hs_int.intersections,
hs_int.dual_facets):
for v in facet:
if v < len(pourbaix_entries):
this_entry = pourbaix_entries[v]
pourbaix_domains[this_entry].append(intersection)
# Remove entries with no pourbaix region
pourbaix_domains = {k: v for k, v in pourbaix_domains.items() if v}
pourbaix_domain_vertices = {}
for entry, points in pourbaix_domains.items():
points = np.array(points)[:, :2]
# Initial sort to ensure consistency
points = points[np.lexsort(np.transpose(points))]
center = np.average(points, axis=0)
points_centered = points - center
# Sort points by cross product of centered points,
# isn't strictly necessary but useful for plotting tools
points_centered = sorted(points_centered,
key=cmp_to_key(lambda x, y: x[0] * y[1] - x[1] * y[0]))
points = points_centered + center
# Create simplices corresponding to pourbaix boundary
simplices = [Simplex(points[indices])
for indices in ConvexHull(points).simplices]
pourbaix_domains[entry] = simplices
pourbaix_domain_vertices[entry] = points
return pourbaix_domains, pourbaix_domain_vertices
def find_stable_entry(self, pH, V):
"""
Finds stable entry at a pH,V condition
Args:
pH (float): pH to find stable entry
V (float): V to find stable entry
Returns:
"""
energies_at_conditions = [e.normalized_energy_at_conditions(pH, V)
for e in self.stable_entries]
return self.stable_entries[np.argmin(energies_at_conditions)]
def get_decomposition_energy(self, entry, pH, V):
"""
Finds decomposition to most stable entries in eV/atom,
supports vectorized inputs for pH and V
Args:
entry (PourbaixEntry): PourbaixEntry corresponding to
compound to find the decomposition for
pH (float, [float]): pH at which to find the decomposition
V (float, [float]): voltage at which to find the decomposition
Returns:
Decomposition energy for the entry, i. e. the energy above
the "pourbaix hull" in eV/atom at the given conditions
"""
# Check composition consistency between entry and Pourbaix diagram:
pbx_comp = Composition(self._elt_comp).fractional_composition
entry_pbx_comp = Composition(
{elt: coeff for elt, coeff in entry.composition.items()
if elt not in ELEMENTS_HO}).fractional_composition
if entry_pbx_comp != pbx_comp:
raise ValueError("Composition of stability entry does not match "
"Pourbaix Diagram")
entry_normalized_energy = entry.normalized_energy_at_conditions(pH, V)
hull_energy = self.get_hull_energy(pH, V)
decomposition_energy = entry_normalized_energy - hull_energy
# Convert to eV/atom instead of eV/normalized formula unit
decomposition_energy /= entry.normalization_factor
decomposition_energy /= entry.composition.num_atoms
return decomposition_energy
def get_hull_energy(self, pH, V):
"""
Gets the minimum energy of the pourbaix "basin" that is formed
from the stable pourbaix planes. Vectorized.
Args:
pH (float or [float]): pH at which to find the hull energy
V (float or [float]): V at which to find the hull energy
Returns:
(float or [float]) minimum pourbaix energy at conditions
"""
all_gs = np.array([e.normalized_energy_at_conditions(
pH, V) for e in self.stable_entries])
base = np.min(all_gs, axis=0)
return base
def get_stable_entry(self, pH, V):
"""
Gets the stable entry at a given pH, V condition
Args:
pH (float): pH at a given condition
V (float): V at a given condition
Returns:
(PourbaixEntry or MultiEntry): pourbaix or multi-entry
corresponding ot the minimum energy entry at a given
pH, V condition
"""
all_gs = np.array([e.normalized_energy_at_conditions(
pH, V) for e in self.stable_entries])
return self.stable_entries[np.argmin(all_gs)]
@property
def stable_entries(self):
"""
Returns the stable entries in the Pourbaix diagram.
"""
return list(self._stable_domains.keys())
@property
def unstable_entries(self):
"""
Returns all unstable entries in the Pourbaix diagram
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def all_entries(self):
"""
Return all entries used to generate the pourbaix diagram
"""
return self._processed_entries
@property
def unprocessed_entries(self):
"""
Return unprocessed entries
"""
return self._unprocessed_entries
def as_dict(self, include_unprocessed_entries=False):
"""
Args:
include_unprocessed_entries (): Whether to include unprocessed entries.
Returns:
MSONable dict.
"""
if include_unprocessed_entries:
entries = [e.as_dict() for e in self._unprocessed_entries]
else:
entries = [e.as_dict() for e in self._processed_entries]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": entries,
"comp_dict": self._elt_comp,
"conc_dict": self._conc_dict}
return d
@classmethod
def from_dict(cls, d):
"""
Args:
d (): Dict representation.
Returns:
PourbaixDiagram
"""
decoded_entries = MontyDecoder().process_decoded(d['entries'])
return cls(decoded_entries, d.get('comp_dict'),
d.get('conc_dict'))
class PourbaixPlotter:
"""
A plotter class for phase diagrams.
"""
def __init__(self, pourbaix_diagram):
"""
Args:
pourbaix_diagram (PourbaixDiagram): A PourbaixDiagram object.
"""
self._pbx = pourbaix_diagram
def show(self, *args, **kwargs):
"""
Shows the pourbaix plot
Args:
*args: args to get_pourbaix_plot
**kwargs: kwargs to get_pourbaix_plot
Returns:
None
"""
plt = self.get_pourbaix_plot(*args, **kwargs)
plt.show()
def get_pourbaix_plot(self, limits=None, title="",
label_domains=True, plt=None):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram
"""
if limits is None:
limits = [[-2, 16], [-3, 3]]
plt = plt or pretty_plot(16)
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, vertices in self._pbx._stable_domain_vertices.items():
center = np.average(vertices, axis=0)
x, y = np.transpose(np.vstack([vertices, vertices[0]]))
plt.plot(x, y, 'k-', linewidth=lw)
if label_domains:
plt.annotate(generate_entry_label(entry), center, ha='center',
va='center', fontsize=20, color="b").draggable()
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def plot_entry_stability(self, entry, pH_range=None, pH_resolution=100,
V_range=None, V_resolution=100, e_hull_max=1,
cmap='RdYlBu_r', **kwargs):
"""
Args:
entry ():
pH_range ():
pH_resolution ():
V_range ():
V_resolution ():
e_hull_max ():
cmap ():
**kwargs ():
Returns:
"""
if pH_range is None:
pH_range = [-2, 16]
if V_range is None:
V_range = [-3, 3]
# plot the Pourbaix diagram
plt = self.get_pourbaix_plot(**kwargs)
pH, V = np.mgrid[pH_range[0]:pH_range[1]:pH_resolution * 1j, V_range[0]:V_range[1]:V_resolution * 1j]
stability = self._pbx.get_decomposition_energy(entry, pH, V)
# Plot stability map
plt.pcolor(pH, V, stability, cmap=cmap, vmin=0, vmax=e_hull_max)
cbar = plt.colorbar()
cbar.set_label("Stability of {} (eV/atom)".format(
generate_entry_label(entry)))
# Set ticklabels
# ticklabels = [t.get_text() for t in cbar.ax.get_yticklabels()]
# ticklabels[-1] = '>={}'.format(ticklabels[-1])
# cbar.ax.set_yticklabels(ticklabels)
return plt
def domain_vertices(self, entry):
"""
Returns the vertices of the Pourbaix domain.
Args:
entry: Entry for which domain vertices are desired
Returns:
list of vertices
"""
return self._pbx._stable_domain_vertices[entry]
def generate_entry_label(entry):
"""
Generates a label for the pourbaix plotter
Args:
entry (PourbaixEntry or MultiEntry): entry to get a label for
"""
if isinstance(entry, MultiEntry):
return " + ".join([latexify_ion(latexify(e.name)) for e in entry.entry_list])
else:
return latexify_ion(latexify(entry.name))
def latexify_ion(formula):
"""
Convert a formula to latex format.
Args:
formula (str): Formula
Returns:
Latex string.
"""
return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", formula)
| mit |
ThomasMiconi/htmresearch | projects/sequence_prediction/discrete_sequences/plot.py | 8 | 7504 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import json
from matplotlib import pyplot
import numpy
from expsuite import PyExperimentSuite
def readExperiment(experiment):
with open(experiment, "r") as file:
predictions = []
predictionsSDR = []
truths = []
iterations = []
resets = []
randoms = []
trains = []
killCell = []
sequenceCounter = []
counter = 0
for line in file.readlines():
dataRec = json.loads(line)
if 'iteration' in dataRec.keys():
iterations.append(dataRec['iteration'])
else:
iterations.append(counter)
if 'predictions' in dataRec.keys():
predictions.append(dataRec['predictions'])
else:
predictions.append(None)
if 'predictionsSDR' in dataRec.keys():
predictionsSDR.append(dataRec['predictionsSDR'])
else:
predictionsSDR.append(None)
if 'truth' in dataRec.keys():
truths.append(dataRec['truth'])
else:
truths.append(None)
if 'reset' in dataRec.keys():
resets.append(dataRec['reset'])
else:
resets.append(None)
if 'random' in dataRec.keys():
randoms.append(dataRec['random'])
else:
randoms.append(None)
if 'train' in dataRec.keys():
trains.append(dataRec['train'])
else:
trains.append(None)
if 'killCell' in dataRec.keys():
killCell.append(dataRec['killCell'])
else:
killCell.append(None)
if 'sequenceCounter' in dataRec.keys():
sequenceCounter.append(dataRec['sequenceCounter'])
else:
sequenceCounter.append(None)
counter += 1
return {'predictions': predictions,
'predictionsSDR': predictionsSDR,
'truths': truths,
'iterations': iterations,
'resets': resets,
'randoms': randoms,
'trains': trains,
'killCell': killCell,
'sequenceCounter': sequenceCounter}
def movingAverage(a, n):
movingAverage = []
for i in xrange(len(a)):
start = max(0, i - n)
values = a[start:i+1]
movingAverage.append(sum(values) / float(len(values)))
return movingAverage
def plotMovingAverage(data, window, label=None):
movingData = movingAverage(data, min(len(data), window))
style = 'ro' if len(data) < window else ''
pyplot.plot(range(len(movingData)), movingData, style, label=label)
def plotAccuracy(results, train=None, window=100, type="sequences", label=None, hideTraining=True, lineSize=None):
pyplot.title("High-order prediction")
pyplot.xlabel("# of sequences seen")
pyplot.ylabel("High-order prediction accuracy over last {0} tested {1}".format(window, type))
accuracy = results[0]
x = results[1]
movingData = movingAverage(accuracy, min(len(accuracy), window))
pyplot.plot(x, movingData, label=label, linewidth=lineSize)
# dX = numpy.array([x[i+1] - x[i] for i in xrange(len(x) - 1)])
# testEnd = numpy.array(x)[dX > dX.mean()].tolist()
# testEnd = testEnd + [x[-1]]
# dX = numpy.insert(dX, 0, 0)
# testStart = numpy.array(x)[dX > dX.mean()].tolist()
# testStart = [0] + testStart
# for line in testStart:
# pyplot.axvline(line, color='orange')
# for i in xrange(len(testStart)):
# pyplot.axvspan(testStart[i], testEnd[i], alpha=0.15, facecolor='black')
if not hideTraining:
for i in xrange(len(train)):
if train[i]:
pyplot.axvline(i, color='orange')
pyplot.xlim(0, x[-1])
pyplot.ylim(0, 1.1)
def computeAccuracy(predictions, truths, iterations,
resets=None, randoms=None, num=None,
sequenceCounter=None):
accuracy = []
x = []
for i in xrange(len(predictions) - 1):
if num is not None and i > num:
continue
if truths[i] is None:
continue
# identify the end of sequence
if resets is not None or randoms is not None:
if not (resets[i+1] or randoms[i+1]):
continue
correct = truths[i] is None or truths[i] in predictions[i]
accuracy.append(correct)
if sequenceCounter is not None:
x.append(sequenceCounter[i])
else:
x.append(iterations[i])
return (accuracy, x)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('experiments', metavar='/path/to/experiment /path/...', nargs='+', type=str)
parser.add_argument('-w', '--window', type=int, default=100)
parser.add_argument('-n', '--num', type=int, default=None)
parser.add_argument('-t', '--training-hide', type=int, nargs='+')
parser.add_argument('-g', '--graph-labels', type=str, nargs='+')
parser.add_argument('-s', '--size-of-line', type=float, nargs='+')
parser.add_argument('-l', '--legend-position', type=int, default=4)
parser.add_argument('-f', '--full', action='store_true')
parser.add_argument('-o', '--output', type=str, default=None)
suite = PyExperimentSuite()
args = parser.parse_args()
from pylab import rcParams
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
rcParams.update({'ytick.labelsize': 8})
rcParams.update({'figure.figsize': (12, 6)})
rcParams.update({'pdf.fonttype': 42})
experiments = args.experiments
for i, experiment in enumerate(experiments):
iteration = suite.get_history(experiment, 0, 'iteration')
predictions = suite.get_history(experiment, 0, 'predictions')
truth = suite.get_history(experiment, 0, 'truth')
train = suite.get_history(experiment, 0, 'train')
resets = None if args.full else suite.get_history(experiment, 0, 'reset')
randoms = None if args.full else suite.get_history(experiment, 0, 'random')
type = "elements" if args.full else "sequences"
hideTraining = args.training_hide is not None and len(args.training_hide) > i and args.training_hide[i] > 0
lineSize = args.size_of_line[i] if args.size_of_line is not None and len(args.size_of_line) > i else 0.8
label = args.graph_labels[i] if args.graph_labels is not None and len(args.graph_labels) > i else experiment
plotAccuracy(computeAccuracy(predictions, truth, iteration, resets=resets, randoms=randoms, num=args.num),
train,
window=args.window,
type=type,
label=label,
hideTraining=hideTraining,
lineSize=lineSize)
if len(experiments) > 1:
pyplot.legend(loc=args.legend_position)
if args.output is not None:
pyplot.savefig(args.output)
else:
pyplot.show()
| agpl-3.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/io/tests/test_packers.py | 9 | 21638 | import nose
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, SparseSeries, SparseDataFrame,
SparsePanel)
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean, assert_index_equal
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas.sparse.tests.test_sparse import assert_sp_series_equal, assert_sp_frame_equal
from pandas import Timestamp, tslib
nan = np.nan
from pandas.io.packers import to_msgpack, read_msgpack
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10,2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result,df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result,df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result,df)
s = to_msgpack(None,df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p,'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [ DataFrame(np.random.randn(10,2)) for i in range(5) ]
s = to_msgpack(None,*dfs)
for i, result in enumerate(read_msgpack(s,iterator=True)):
tm.assert_frame_equal(result,dfs[i])
def test_invalid_arg(self):
#GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32','float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x,x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(
2013, 1, 1), datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1), np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), ('foo', 'two'),
('qux', 'one'), ('qux', 'two')], names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'),Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'),Timestamp('20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
# this currently fails
self.assertRaises(UnicodeEncodeError, self.encode_decode, i)
#i_rec = self.encode_decode(i)
#self.assertTrue(i.equals(i_rec))
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(dict([(k, data[k]) for k in ['A', 'B', 'C', 'D']]))}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'], ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple(
[self.frame['float'], self.frame['float'].A, self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
self.assertIsInstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1]*100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
#i_rec = self.encode_decode(obj)
#comparator(obj, i_rec, **kwargs)
self.assertRaises(NotImplementedError, self.encode_decode, obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
class TestCompression(TestPackers):
"""See https://github.com/pydata/pandas/pull/9783
"""
def setUp(self):
super(TestCompression, self).setUp()
data = {
'A': np.arange(1000, dtype=np.float64),
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def test_compression_zlib(self):
i_rec = self.encode_decode(self.frame, compress='zlib')
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def test_compression_blosc(self):
try:
import blosc
except ImportError:
raise nose.SkipTest('no blosc')
i_rec = self.encode_decode(self.frame, compress='blosc')
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
class TestEncoding(TestPackers):
def setUp(self):
super(TestEncoding, self).setUp()
data = {
'A': [compat.u('\u2019')] * 1000,
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
'G': [400] * 1000
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
self.utf_encodings = ['utf8', 'utf16', 'utf32']
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in compat.itervalues(self.frame):
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
class TestMsgpack():
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
NOTE: TestMsgpack can't be a subclass of tm.Testcase to use test generator.
http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
"""
def setUp(self):
from pandas.io.tests.generate_legacy_storage_files import (
create_msgpack_data, create_data)
self.data = create_msgpack_data()
self.all_data = create_data()
self.path = u('__%s__.msgpack' % tm.rands(10))
self.minimum_structure = {'series': ['float', 'int', 'mixed', 'ts', 'mi', 'dup'],
'frame': ['float', 'int', 'mixed', 'mi'],
'panel': ['float'],
'index': ['int', 'date', 'period'],
'mi': ['reg2']}
def check_min_structure(self, data):
for typ, v in self.minimum_structure.items():
assert typ in data, '"{0}" not found in unpacked data'.format(typ)
for kind in v:
assert kind in data[typ], '"{0}" not found in data["{1}"]'.format(kind, typ)
def compare(self, vf, version):
data = read_msgpack(vf)
self.check_min_structure(data)
for typ, dv in data.items():
assert typ in self.all_data, 'unpacked data contains extra key "{0}"'.format(typ)
for dt, result in dv.items():
assert dt in self.all_data[typ], 'data["{0}"] contains extra key "{1}"'.format(typ, dt)
try:
expected = self.data[typ][dt]
except KeyError:
continue
# use a specific comparator
# if available
comparator = getattr(self,"compare_{typ}_{dt}".format(typ=typ,dt=dt), None)
if comparator is not None:
comparator(result, expected, typ, version)
else:
check_arbitrary(result, expected)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def read_msgpacks(self, version):
pth = tm.get_data_path('legacy_msgpack/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
vf = os.path.join(pth, f)
self.compare(vf, version)
n += 1
assert n > 0, 'Msgpack files are not tested'
def test_msgpack(self):
msgpack_path = tm.get_data_path('legacy_msgpack')
n = 0
for v in os.listdir(msgpack_path):
pth = os.path.join(msgpack_path, v)
if os.path.isdir(pth):
yield self.read_msgpacks, v
n += 1
assert n > 0, 'Msgpack files are not tested'
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
moutai/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
xwolf12/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
jseabold/statsmodels | examples/python/statespace_varmax.py | 5 | 3423 | # DO NOT EDIT
# Autogenerated from the notebook statespace_varmax.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
#!/usr/bin/env python
# coding: utf-8
# # VARMAX models
#
# This is a brief introduction notebook to VARMAX models in statsmodels.
# The VARMAX model is generically specified as:
# $$
# y_t = \nu + A_1 y_{t-1} + \dots + A_p y_{t-p} + B x_t + \epsilon_t +
# M_1 \epsilon_{t-1} + \dots M_q \epsilon_{t-q}
# $$
#
# where $y_t$ is a $\text{k_endog} \times 1$ vector.
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
dta = sm.datasets.webuse('lutkepohl2', 'https://www.stata-press.com/data/r12/')
dta.index = dta.qtr
endog = dta.loc['1960-04-01':'1978-10-01',
['dln_inv', 'dln_inc', 'dln_consump']]
# ## Model specification
#
# The `VARMAX` class in statsmodels allows estimation of VAR, VMA, and
# VARMA models (through the `order` argument), optionally with a constant
# term (via the `trend` argument). Exogenous regressors may also be included
# (as usual in statsmodels, by the `exog` argument), and in this way a time
# trend may be added. Finally, the class allows measurement error (via the
# `measurement_error` argument) and allows specifying either a diagonal or
# unstructured innovation covariance matrix (via the `error_cov_type`
# argument).
# ## Example 1: VAR
#
# Below is a simple VARX(2) model in two endogenous variables and an
# exogenous series, but no constant term. Notice that we needed to allow for
# more iterations than the default (which is `maxiter=50`) in order for the
# likelihood estimation to converge. This is not unusual in VAR models which
# have to estimate a large number of parameters, often on a relatively small
# number of time series: this model, for example, estimates 27 parameters
# off of 75 observations of 3 variables.
exog = endog['dln_consump']
mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']],
order=(2, 0),
trend='n',
exog=exog)
res = mod.fit(maxiter=1000, disp=False)
print(res.summary())
# From the estimated VAR model, we can plot the impulse response functions
# of the endogenous variables.
ax = res.impulse_responses(10, orthogonalized=True).plot(figsize=(13, 3))
ax.set(xlabel='t', title='Responses to a shock to `dln_inv`')
# ## Example 2: VMA
#
# A vector moving average model can also be formulated. Below we show a
# VMA(2) on the same data, but where the innovations to the process are
# uncorrelated. In this example we leave out the exogenous regressor but now
# include the constant term.
mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']],
order=(0, 2),
error_cov_type='diagonal')
res = mod.fit(maxiter=1000, disp=False)
print(res.summary())
# ## Caution: VARMA(p,q) specifications
#
# Although the model allows estimating VARMA(p,q) specifications, these
# models are not identified without additional restrictions on the
# representation matrices, which are not built-in. For this reason, it is
# recommended that the user proceed with error (and indeed a warning is
# issued when these models are specified). Nonetheless, they may in some
# circumstances provide useful information.
mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(1, 1))
res = mod.fit(maxiter=1000, disp=False)
print(res.summary())
| bsd-3-clause |
suku248/nest-simulator | pynest/examples/balancedneuron.py | 8 | 7344 | # -*- coding: utf-8 -*-
#
# balancedneuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Balanced neuron example
-----------------------
This script simulates a neuron driven by an excitatory and an
inhibitory population of neurons firing Poisson spike trains. The aim
is to find a firing rate for the inhibitory population that will make
the neuron fire at the same rate as the excitatory population.
Optimization is performed using the ``bisection`` method from Scipy,
simulating the network repeatedly.
This example is also shown in the article [1]_
References
~~~~~~~~~~
.. [1] Eppler JM, Helias M, Mulller E, Diesmann M, Gewaltig MO (2009). PyNEST: A convenient interface to the NEST
simulator, Front. Neuroinform.
http://dx.doi.org/10.3389/neuro.11.012.2008
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting. Scipy should be imported before nest.
from scipy.optimize import bisect
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
###############################################################################
# Additionally, we set the verbosity using ``set_verbosity`` to
# suppress info messages.
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, the simulation parameters are assigned to variables.
t_sim = 25000.0 # how long we simulate
n_ex = 16000 # size of the excitatory population
n_in = 4000 # size of the inhibitory population
r_ex = 5.0 # mean rate of the excitatory population
r_in = 20.5 # initial rate of the inhibitory population
epsc = 45.0 # peak amplitude of excitatory synaptic currents
ipsc = -45.0 # peak amplitude of inhibitory synaptic currents
d = 1.0 # synaptic delay
lower = 15.0 # lower bound of the search interval
upper = 25.0 # upper bound of the search interval
prec = 0.01 # how close need the excitatory rates be
###############################################################################
# Third, the nodes are created using ``Create``. We store the returned
# handles in variables for later reference.
neuron = nest.Create("iaf_psc_alpha")
noise = nest.Create("poisson_generator", 2)
voltmeter = nest.Create("voltmeter")
spikerecorder = nest.Create("spike_recorder")
###################################################################################
# Fourth, the ``poisson_generator`` (`noise`) is configured.
# Note that we need not set parameters for the neuron, the spike recorder, and
# the voltmeter, since they have satisfactory defaults.
noise.rate = [n_ex * r_ex, n_in * r_in]
###############################################################################
# Fifth, the ``iaf_psc_alpha`` is connected to the ``spike_recorder`` and the
# ``voltmeter``, as are the two Poisson generators to the neuron. The command
# ``Connect`` has different variants. Plain `Connect` just takes the handles of
# pre- and postsynaptic nodes and uses the default values for weight and
# delay. It can also be called with a list of weights, as in the connection
# of the noise below.
# Note that the connection direction for the ``voltmeter`` is reversed compared
# to the ``spike_recorder``, because it observes the neuron instead of
# receiving events from it. Thus, ``Connect`` reflects the direction of signal
# flow in the simulation kernel rather than the physical process of inserting
# an electrode into the neuron. The latter semantics is presently not
# available in NEST.
nest.Connect(neuron, spikerecorder)
nest.Connect(voltmeter, neuron)
nest.Connect(noise, neuron, syn_spec={'weight': [[epsc, ipsc]], 'delay': 1.0})
###############################################################################
# To determine the optimal rate of the neurons in the inhibitory population,
# the network is simulated several times for different values of the
# inhibitory rate while measuring the rate of the target neuron. This is done
# by calling ``Simulate`` until the rate of the target neuron matches the rate
# of the neurons in the excitatory population with a certain accuracy. The
# algorithm is implemented in two steps:
#
# First, the function ``output_rate`` is defined to measure the firing rate
# of the target neuron for a given rate of the inhibitory neurons.
def output_rate(guess):
print("Inhibitory rate estimate: %5.2f Hz" % guess)
rate = float(abs(n_in * guess))
noise[1].rate = rate
spikerecorder.n_events = 0
nest.Simulate(t_sim)
out = spikerecorder.n_events * 1000.0 / t_sim
print(" -> Neuron rate: %6.2f Hz (goal: %4.2f Hz)" % (out, r_ex))
return out
###############################################################################
# The function takes the firing rate of the inhibitory neurons as an
# argument. It scales the rate with the size of the inhibitory population and
# configures the inhibitory Poisson generator (`noise[1]`) accordingly.
# Then, the spike counter of the ``spike_recorder`` is reset to zero. The
# network is simulated using ``Simulate``, which takes the desired simulation
# time in milliseconds and advances the network state by this amount of time.
# During simulation, the ``spike_recorder`` counts the spikes of the target
# neuron and the total number is read out at the end of the simulation
# period. The return value of ``output_rate()`` is the firing rate of the
# target neuron in Hz.
#
# Second, the scipy function ``bisect`` is used to determine the optimal
# firing rate of the neurons of the inhibitory population.
in_rate = bisect(lambda x: output_rate(x) - r_ex, lower, upper, xtol=prec)
print("Optimal rate for the inhibitory population: %.2f Hz" % in_rate)
###############################################################################
# The function ``bisect`` takes four arguments: first a function whose
# zero crossing is to be determined. Here, the firing rate of the target
# neuron should equal the firing rate of the neurons of the excitatory
# population. Thus we define an anonymous function (using `lambda`) that
# returns the difference between the actual rate of the target neuron and the
# rate of the excitatory Poisson generator, given a rate for the inhibitory
# neurons. The next two arguments are the lower and upper bound of the
# interval in which to search for the zero crossing. The fourth argument of
# ``bisect`` is the desired relative precision of the zero crossing.
#
# Finally, we plot the target neuron's membrane potential as a function of
# time.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 |
yask123/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
francisco-dlp/hyperspy | hyperspy/drawing/_widgets/range.py | 1 | 22096 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from matplotlib.widgets import SpanSelector
import inspect
import logging
from hyperspy.drawing.widgets import ResizableDraggableWidgetBase
from hyperspy.events import Events, Event
_logger = logging.getLogger(__name__)
# Track if we have already warned when the widget is out of range
already_warn_out_of_range = False
def in_interval(number, interval):
if interval[0] <= number <= interval[1]:
return True
else:
return False
class RangeWidget(ResizableDraggableWidgetBase):
"""RangeWidget is a span-patch based widget, which can be
dragged and resized by mouse/keys. Basically a wrapper for
ModifiablepanSelector so that it conforms to the common widget interface.
For optimized changes of geometry, the class implements two methods
'set_bounds' and 'set_ibounds', to set the geometry of the rectangle by
value and index space coordinates, respectivly.
Implements the internal method _validate_geometry to make sure the patch
will always stay within bounds.
"""
def __init__(self, axes_manager, ax=None, alpha=0.5, **kwargs):
# Parse all kwargs for the matplotlib SpanSelector
self._SpanSelector_kwargs = {}
for key in inspect.signature(SpanSelector).parameters.keys():
if key in kwargs:
self._SpanSelector_kwargs[key] = kwargs.pop(key)
super(RangeWidget, self).__init__(axes_manager, alpha=alpha, **kwargs)
self.span = None
def set_on(self, value):
if value is not self.is_on() and self.ax is not None:
if value is True:
self._add_patch_to(self.ax)
self.connect(self.ax)
elif value is False:
self.disconnect()
try:
self.ax.figure.canvas.draw_idle()
except BaseException: # figure does not exist
pass
if value is False:
self.ax = None
self.__is_on = value
def _add_patch_to(self, ax):
self.span = ModifiableSpanSelector(ax, **self._SpanSelector_kwargs)
self.span.set_initial(self._get_range())
self.span.bounds_check = True
self.span.snap_position = self.snap_position
self.span.snap_size = self.snap_size
self.span.can_switch = True
self.span.events.changed.connect(self._span_changed, {'obj': 'widget'})
self.span.step_ax = self.axes[0]
self.span.tolerance = 5
self.patch = [self.span.rect]
self.patch[0].set_color(self.color)
self.patch[0].set_alpha(self.alpha)
def _span_changed(self, widget):
r = self._get_range()
pr = widget.range
if r != pr:
dx = self.axes[0].scale
x = pr[0] + 0.5 * dx
w = pr[1] + 0.5 * dx - x
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._validate_geometry()
if self._pos != np.array([x]) or self._size != np.array([w]):
self._update_patch_size()
self._apply_changes(old_size=old_size, old_position=old_position)
def _get_range(self):
p = self._pos[0]
w = self._size[0]
offset = self.axes[0].scale
p -= 0.5 * offset
return (p, p + w)
def _parse_bounds_args(self, args, kwargs):
if len(args) == 1:
return args[0]
elif len(args) == 4:
return args
elif len(kwargs) == 1 and 'bounds' in kwargs:
return kwargs.values()[0]
else:
x = kwargs.pop('x', kwargs.pop('left', self._pos[0]))
if 'right' in kwargs:
w = kwargs.pop('right') - x
else:
w = kwargs.pop('w', kwargs.pop('width', self._size[0]))
return x, w
def set_ibounds(self, *args, **kwargs):
"""
Set bounds by indices. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, width)
OR
* 'x'/'left'
* 'w'/'width', alternatively 'right'
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width will be kept, not right).
"""
ix, iw = self._parse_bounds_args(args, kwargs)
x = self.axes[0].index2value(ix)
w = self._i2v(self.axes[0], ix + iw) - x
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._apply_changes(old_size=old_size, old_position=old_position)
def set_bounds(self, *args, **kwargs):
"""
Set bounds by values. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, width)
OR
* 'x'/'left'
* 'w'/'width', alternatively 'right' (x+w)
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width will be kept, not right).
"""
global already_warn_out_of_range
def warn(obj, parameter, value):
global already_warn_out_of_range
if not already_warn_out_of_range:
_logger.info('{}: {} is out of range. It is therefore set '
'to the value of {}'.format(obj, parameter, value))
already_warn_out_of_range = True
x, w = self._parse_bounds_args(args, kwargs)
l0, h0 = self.axes[0].low_value, self.axes[0].high_value
scale = self.axes[0].scale
in_range = 0
if x < l0:
x = l0
warn(self, '`x` or `left`', x)
elif h0 <= x:
x = h0 - scale
warn(self, '`x` or `left`', x)
else:
in_range += 1
if w < scale:
w = scale
warn(self, '`width` or `right`', w)
elif not (l0 + scale <= x + w <= h0 + scale):
if self.size != np.array([w]): # resize
w = h0 + scale - self.position[0]
warn(self, '`width` or `right`', w)
if self.position != np.array([x]): # moved
x = h0 + scale - self.size[0]
warn(self, '`x` or `left`', x)
else:
in_range += 1
# if we are in range again, reset `already_warn_out_of_range` to False
if in_range == 2 and already_warn_out_of_range:
_logger.info('{} back in range.'.format(self.__class__.__name__))
already_warn_out_of_range = False
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._apply_changes(old_size=old_size, old_position=old_position)
def _update_patch_position(self):
self._update_patch_geometry()
def _update_patch_size(self):
self._update_patch_geometry()
def _update_patch_geometry(self):
if self.is_on() and self.span is not None:
self.span.range = self._get_range()
def disconnect(self):
super(RangeWidget, self).disconnect()
if self.span:
self.span.turn_off()
self.span = None
def _set_snap_position(self, value):
super(RangeWidget, self)._set_snap_position(value)
self.span.snap_position = value
self._update_patch_geometry()
def _set_snap_size(self, value):
super(RangeWidget, self)._set_snap_size(value)
self.span.snap_size = value
self._update_patch_size()
def _validate_geometry(self, x1=None):
"""Make sure the entire patch always stays within bounds. First the
position (either from position property or from x1 argument), is
limited within the bounds. Then, if the right edge are out of
bounds, the position is changed so that they will be at the limit.
The modified geometry is stored, but no change checks are performed.
Call _apply_changes after this in order to process any changes (the
size might change if it is set larger than the bounds size).
"""
xaxis = self.axes[0]
# Make sure widget size is not larger than axes
self._size[0] = min(self._size[0], xaxis.size * xaxis.scale)
# Make sure x1 is within bounds
if x1 is None:
x1 = self._pos[0] # Get it if not supplied
if x1 < xaxis.low_value:
x1 = xaxis.low_value
elif x1 > xaxis.high_value:
x1 = xaxis.high_value
# Make sure x2 is with upper bound.
# If not, keep dims, and change x1!
x2 = x1 + self._size[0]
if x2 > xaxis.high_value + xaxis.scale:
x2 = xaxis.high_value + xaxis.scale
x1 = x2 - self._size[0]
self._pos = np.array([x1])
# Apply snaps if appropriate
if self.snap_position:
self._do_snap_position()
if self.snap_size:
self._do_snap_size()
class ModifiableSpanSelector(SpanSelector):
def __init__(self, ax, **kwargs):
onselect = kwargs.pop('onselect', self.dummy)
direction = kwargs.pop('direction', 'horizontal')
useblit = kwargs.pop('useblit', ax.figure.canvas.supports_blit)
SpanSelector.__init__(self, ax, onselect, direction=direction,
useblit=useblit, span_stays=False, **kwargs)
# The tolerance in points to pick the rectangle sizes
self.tolerance = 1
self.on_move_cid = None
self._range = None
self.step_ax = None
self.bounds_check = False
self._button_down = False
self.snap_size = False
self.snap_position = False
self.events = Events()
self.events.changed = Event(doc="""
Event that triggers when the widget was changed.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.events.moved = Event(doc="""
Event that triggers when the widget was moved.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.events.resized = Event(doc="""
Event that triggers when the widget was resized.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.can_switch = False
def dummy(self, *args, **kwargs):
pass
def _get_range(self):
self.update_range()
return self._range
def _set_range(self, value):
self.update_range()
if self._range != value:
resized = (
self._range[1] -
self._range[0]) != (
value[1] -
value[0])
moved = self._range[0] != value[0]
self._range = value
if moved:
self._set_span_x(value[0])
self.events.moved.trigger(self)
if resized:
self._set_span_width(value[1] - value[0])
self.events.resized.trigger(self)
if moved or resized:
self.draw_patch()
self.events.changed.trigger(self)
range = property(_get_range, _set_range)
def _set_span_x(self, value):
if self.direction == 'horizontal':
self.rect.set_x(value)
else:
self.rect.set_y(value)
def _set_span_width(self, value):
if self.direction == 'horizontal':
self.rect.set_width(value)
else:
self.rect.set_height(value)
def _get_span_x(self):
if self.direction == 'horizontal':
return self.rect.get_x()
else:
return self.rect.get_y()
def _get_span_width(self):
if self.direction == 'horizontal':
return self.rect.get_width()
else:
return self.rect.get_height()
def _get_mouse_position(self, event):
if self.direction == 'horizontal':
return event.xdata
else:
return event.ydata
def set_initial(self, initial_range=None):
"""
Remove selection events, set the spanner, and go to modify mode.
"""
if initial_range is not None:
self.range = initial_range
self.disconnect_events()
# And connect to the new ones
self.connect_event('button_press_event', self.mm_on_press)
self.connect_event('button_release_event', self.mm_on_release)
self.connect_event('draw_event', self.update_background)
self.rect.set_visible(True)
self.rect.contains = self.contains
def update(self, *args):
# Override the SpanSelector `update` method to blit properly all
# artirts before we go to "modify mode" in `set_initial`.
self.draw_patch()
def draw_patch(self, *args):
"""Update the patch drawing.
"""
try:
if self.useblit and hasattr(self.ax, 'hspy_fig'):
self.ax.hspy_fig._update_animated()
elif self.ax.figure is not None:
self.ax.figure.canvas.draw_idle()
except AttributeError:
pass # When figure is None, typically when closing
def contains(self, mouseevent):
x, y = self.rect.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
v = x if self.direction == 'vertical' else y
# Assert y is correct first
if not (0.0 <= v <= 1.0):
return False, {}
x_pt = self._get_point_size_in_data_units()
hit = self._range[0] - x_pt, self._range[1] + x_pt
if hit[0] < self._get_mouse_position < hit[1]:
return True, {}
return False, {}
def release(self, event):
"""When the button is released, the span stays in the screen and the
iteractivity machinery passes to modify mode"""
if self.pressv is None or (self.ignore(event) and not self._button_down):
return
self._button_down = False
self.update_range()
self.set_initial()
def _get_point_size_in_data_units(self):
# Calculate the point size in data units
invtrans = self.ax.transData.inverted()
(x, y) = (1, 0) if self.direction == 'horizontal' else (0, 1)
x_pt = self.tolerance * abs((invtrans.transform((x, y)) -
invtrans.transform((0, 0)))[y])
return x_pt
def mm_on_press(self, event):
if self.ignore(event) and not self._button_down:
return
self._button_down = True
x_pt = self._get_point_size_in_data_units()
# Determine the size of the regions for moving and stretching
self.update_range()
left_region = self._range[0] - x_pt, self._range[0] + x_pt
right_region = self._range[1] - x_pt, self._range[1] + x_pt
middle_region = self._range[0] + x_pt, self._range[1] - x_pt
if in_interval(self._get_mouse_position(event), left_region) is True:
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_left)
elif in_interval(self._get_mouse_position(event), right_region):
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_right)
elif in_interval(self._get_mouse_position(event), middle_region):
self.pressv = self._get_mouse_position(event)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_rect)
else:
return
def update_range(self):
self._range = (self._get_span_x(),
self._get_span_x() + self._get_span_width())
def switch_left_right(self, x, left_to_right):
if left_to_right:
if self.step_ax is not None:
if x > self.step_ax.high_value + self.step_ax.scale:
return
w = self._range[1] - self._range[0]
r0 = self._range[1]
self._set_span_x(r0)
r1 = r0 + w
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_right)
else:
if self.step_ax is not None:
if x < self.step_ax.low_value - self.step_ax.scale:
return
w = self._range[1] - self._range[0]
r1 = self._range[0]
r0 = r1 - w
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_left)
self._range = (r0, r1)
def move_left(self, event):
if self._button_down is False or self.ignore(event):
return
x = self._get_mouse_position(event)
if self.step_ax is not None:
if (self.bounds_check and
x < self.step_ax.low_value - self.step_ax.scale):
return
if self.snap_position:
snap_offset = self.step_ax.offset - 0.5 * self.step_ax.scale
elif self.snap_size:
snap_offset = self._range[1]
if self.snap_position or self.snap_size:
rem = (x - snap_offset) % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x += rem
# Do not move the left edge beyond the right one.
if x >= self._range[1]:
if self.can_switch and x > self._range[1]:
self.switch_left_right(x, True)
self.move_right(event)
return
width_increment = self._range[0] - x
if self._get_span_width() + width_increment <= 0:
return
self._set_span_x(x)
self._set_span_width(self._get_span_width() + width_increment)
self.update_range()
self.events.moved.trigger(self)
self.events.resized.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def move_right(self, event):
if self._button_down is False or self.ignore(event):
return
x = self._get_mouse_position(event)
if self.step_ax is not None:
if (self.bounds_check and
x > self.step_ax.high_value + self.step_ax.scale):
return
if self.snap_size:
snap_offset = self._range[0]
rem = (x - snap_offset) % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x += rem
# Do not move the right edge beyond the left one.
if x <= self._range[0]:
if self.can_switch and x < self._range[0]:
self.switch_left_right(x, False)
self.move_left(event)
return
width_increment = x - self._range[1]
if self._get_span_width() + width_increment <= 0:
return
self._set_span_width(self._get_span_width() + width_increment)
self.update_range()
self.events.resized.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def move_rect(self, event):
if self._button_down is False or self.ignore(event):
return
x_increment = self._get_mouse_position(event) - self.pressv
if self.step_ax is not None:
if self.snap_position:
rem = x_increment % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x_increment += rem
self._set_span_x(self._get_span_x() + x_increment)
self.update_range()
self.pressv += x_increment
self.events.moved.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def mm_on_release(self, event):
if self._button_down is False or self.ignore(event):
return
self._button_down = False
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = None
def turn_off(self):
self.disconnect_events()
if self.on_move_cid is not None:
self.canvas.mpl_disconnect(self.on_move_cid)
self.ax.patches.remove(self.rect)
self.ax.figure.canvas.draw_idle()
| gpl-3.0 |
andrebrener/crypto_predictor | get_market_cap.py | 1 | 3758 | # =============================================================================
# File: get_fees.py
# Author: Andre Brener
# Created: 12 Jun 2017
# Last Modified: 18 Jun 2017
# Description: description
# =============================================================================
import os
import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_links(url):
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
lists = soup.find_all("li", {"class": "text-center"})
links = []
for li in lists:
a_tags = li.find_all("a")
href = a_tags[0]['href']
day_link = href.split('/')[-2]
links.append('{}{}'.format(url, day_link))
return links
def get_market_value(week_date, url):
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
coin_symbols = [
tag.text.strip() for tag in soup.find_all("td", {"class": "text-left"})
if len(tag.text.strip()) > 0
]
market_caps = [
tag.text.strip()[1:].replace(',', '')
for tag in soup.find_all("td",
{"class": "no-wrap market-cap text-right"})
if len(tag.text.strip()) > 0
]
df = pd.DataFrame()
df['coin'] = coin_symbols
df['market_cap'] = market_caps
df['market_cap'] = df['market_cap'].apply(
lambda x: int(x) if x != '' else 0)
df['date'] = week_date
df['date'] = pd.to_datetime(df['date'])
return df
def get_data(main_url, current_df):
links = get_links(main_url)
df_list = []
for url in links:
week_date = url.split('/')[-1]
# if week_date in list(current_df['date'].dt.strftime('%Y%m%d')):
# continue
print(url)
df = get_market_value(week_date, url)
df_list.append(df)
if len(df_list) == 0:
result_df = current_df
else:
total_df = pd.concat(df_list)
total_df = total_df.pivot_table(
index='date', columns='coin', values='market_cap',
aggfunc='max').reset_index()
total_df['week_number'] = total_df['date'].dt.week
total_df['year'] = total_df['date'].dt.year
total_dates = list(total_df['date'])
new_df = pd.DataFrame()
new_df['date'] = pd.date_range(total_dates[0], total_dates[-1])
new_df['week_number'] = new_df['date'].dt.week
new_df['year'] = new_df['date'].dt.year
cols = [col for col in total_df.columns if 'date' not in col]
final_df = pd.merge(
total_df[cols], new_df,
on=['week_number', 'year']).sort_values('date')
final_cols = [
col for col in total_df.columns
if all(kw not in col for kw in ['week_number', 'year'])
]
final_df = final_df[final_cols].fillna(0)
if current_df.empty:
result_df = final_df
else:
result_df = pd.concat([current_df, final_df])
coin_list = [col for col in result_df.columns if 'date' not in col]
btc_market_caps = pd.DataFrame()
btc_market_caps['date'] = result_df['date']
for col in coin_list:
btc_market_caps[col] = result_df[col] / result_df['BTC']
return result_df, btc_market_caps
if __name__ == '__main__':
historical_url = 'https://coinmarketcap.com/historical/'
# current_df = pd.read_csv('data/historical_market_caps.csv')
# current_df['date'] = pd.to_datetime(current_df['date'])
current_df = pd.DataFrame()
df, btc_df = get_data(historical_url, current_df)
os.makedirs('data', exist_ok=True)
df.to_csv('data/historical_market_caps.csv', index=False)
btc_df.to_csv('data/historical_market_caps_btc.csv', index=False)
| mit |
dieterich-lab/rp-bp | rpbp/analysis/proteomics/create_orf_peptide_coverage_line_graph.py | 1 | 7303 | #! /usr/bin/env python3
import matplotlib
matplotlib.use('agg')
matplotlib.rc('text', usetex=True)
import argparse
import logging
import re
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import pbio.misc.parallel as parallel
import pbio.misc.utils as utils
default_num_cpus = 1
default_min_length = 0
default_title = ""
default_fontsize = 20
default_note_fontsize = 15
default_line_width = 4
def get_orf_coverage(orf):
""" This function calculates the percentage of an ORF covered by the
matching peptides.
It is adopted from a StackOverflow answer:
http://stackoverflow.com/questions/4173904/string-coverage-optimization-in-python
"""
peptides = orf['peptide_matches']
peptides = peptides.replace(";", "|")
pat = re.compile('(' + peptides + ')')
orf_sequence = orf['orf_sequence']
orf_id = orf['orf_id']
num_matches = orf['num_matches']
peptide_length = len(orf_sequence)
sout = list(orf_sequence)
i = 0
match = pat.search(orf_sequence)
while match:
span = match.span()
sout[span[0]:span[1]] = ['x']*(span[1]-span[0])
i = span[0]+1
match = pat.search(orf_sequence, i)
covered_orf_sequence = ''.join(sout)
num_covered_positions = covered_orf_sequence.count('x')
coverage = num_covered_positions / peptide_length
ret = {
'orf_id': orf_id,
'num_matches': num_matches,
'covered_orf_sequence': covered_orf_sequence,
'num_covered_positions': num_covered_positions,
'coverage': coverage,
'peptide_length': peptide_length
}
return pd.Series(ret)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script creates a plot showing the fraction of predicted ORFs "
"which have a set amount of peptide coverage.")
parser.add_argument('rpbp_peptide_matches', help="The (csv) file containing the peptides "
"matching to each ORF predicted as translated using Rp-Bp (produced by "
"get-orf-peptide-matches)")
parser.add_argument('rpchi_peptide_matches', help="The (csv) file containing the peptides "
"matching to each ORF predicted as translated using Rp-chi (produced by "
"get-orf-peptide-matches)")
parser.add_argument('out', help="The output (image) file")
parser.add_argument('-l', '--min-length', help="The minimum length for ORFs (in "
"nucleotides) to consider in the analyis", type=int, default=default_min_length)
parser.add_argument('-p', '--num-cpus', help="The number of processors to use", type=int,
default=default_num_cpus)
parser.add_argument('--title', default=default_title)
parser.add_argument('--fontsize', type=int, default=default_fontsize)
parser.add_argument('--note-fontsize', type=int, default=default_note_fontsize)
parser.add_argument('--line-width', type=int, default=default_line_width)
utils.add_logging_options(parser)
args = parser.parse_args()
utils.update_logging(args)
msg = "Reading predictions"
logging.info(msg)
rpbp_peptide_matches = pd.read_csv(args.rpbp_peptide_matches)
rpchi_peptide_matches = pd.read_csv(args.rpchi_peptide_matches)
if args.min_length > 0:
msg = "Filtering predictions by: length > {}".format(args.min_length)
logging.warning(msg)
# multiply by 3 because the orf sequences are amino acid sequences
bf_lengths = rpbp_peptide_matches['orf_sequence'].str.len() * 3
m_bf_length = bf_lengths > args.min_length
rpbp_peptide_matches = rpbp_peptide_matches[m_bf_length]
chisq_lengths = rpchi_peptide_matches['orf_sequence'].str.len() * 3
m_chisq_length = chisq_lengths > args.min_length
rpchi_peptide_matches = rpchi_peptide_matches[m_chisq_length]
msg = "Calculating Rp-Bp coverage"
logging.info(msg)
bf_coverage = parallel.apply_parallel(rpbp_peptide_matches,
args.num_cpus,
get_orf_coverage,
progress_bar=True)
bf_coverage = pd.DataFrame(bf_coverage)
msg = "Calculating Rp-chi coverage"
logging.info(msg)
chisq_coverage = parallel.apply_parallel(rpchi_peptide_matches,
args.num_cpus,
get_orf_coverage,
progress_bar=True)
chisq_coverage = pd.DataFrame(chisq_coverage)
msg = "Creating image"
logging.info(msg)
# plot the empirical distribution of ORF lengths
hist_min = 0
hist_max = 1.1
hist_step = 0.05
hist_range = (hist_min, hist_max)
hist_bins = np.arange(hist_min, hist_max, hist_step)
bf_covered_hist, b = np.histogram(bf_coverage['coverage'],
bins=hist_bins,
range=hist_range,
density=True)
chisq_covered_hist, b = np.histogram(chisq_coverage['coverage'],
bins=hist_bins,
range=hist_range,
density=True)
# now, normalize the histograms
bf_covered_hist = bf_covered_hist / np.sum(bf_covered_hist)
chisq_covered_hist = chisq_covered_hist / np.sum(chisq_covered_hist)
# multiply by 100 to give actual percentages
bf_covered_hist = 100 * bf_covered_hist
chisq_covered_hist = 100 * chisq_covered_hist
hist_bins = 100*hist_bins
fig, ax = plt.subplots(figsize=(10,5))
cm = plt.cm.gist_earth
x = np.arange(len(bf_covered_hist))
bf_label = r'\textsc{Rp-Bp}'
ax.plot(x,
bf_covered_hist,
color=cm(0.1),
label=bf_label,
linewidth=args.line_width,
linestyle='--',
marker='^')
chisq_label = r'\textsc{Rp-$\chi^2$}'
ax.plot(x,
chisq_covered_hist,
color=cm(0.3),
label=chisq_label,
linewidth=args.line_width,
linestyle='-.',
marker='D')
ax.set_xlabel('Peptide Coverage (\%)', fontsize=args.fontsize)
ax.set_ylabel('\% of predicted ORFs', fontsize=args.fontsize)
if args.title is not None and len(args.title) > 0:
ax.set_title(args.title, fontsize=args.fontsize)
# only show every 20% on the x-axis
ax.set_xticks(x[::4])
ax.set_xticklabels(hist_bins[::4])
def my_formatter_fun(x, p):
return "${:d}$".format(20*p)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(my_formatter_fun))
# hide the "0" tick label
yticks = ax.yaxis.get_major_ticks()
yticks[0].label1.set_visible(False)
ax.set_xlim((0, len(bf_covered_hist)-1))
ax.set_ylim((0,10))
ax.legend(loc='upper right', fontsize=args.fontsize)
ax.tick_params(axis='both', which='major', labelsize=args.note_fontsize)
fig.savefig(args.out, bbox_inches='tight')
if __name__ == '__main__':
main()
| mit |
zorroblue/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 18 | 48928 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn import neighbors, datasets
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors.base import VALID_METRICS_SPARSE, VALID_METRICS
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99],
[0.98, 0.98], [2.01, 2.01]])
y = np.array([1, 2, 1, 1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([-1, 1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = {}
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results[algorithm] = neigh.kneighbors(test, return_distance=True)
assert_array_almost_equal(results['brute'][0], results['ball_tree'][0])
assert_array_almost_equal(results['brute'][1], results['ball_tree'][1])
if 'kd_tree' in results:
assert_array_almost_equal(results['brute'][0],
results['kd_tree'][0])
assert_array_almost_equal(results['brute'][1],
results['kd_tree'][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_valid_brute_metric_for_auto_algorithm():
X = rng.rand(12, 12)
Xcsr = csr_matrix(X)
# check that there is a metric that is valid for brute
# but not ball_tree (so we actually test something)
assert_in("cosine", VALID_METRICS['brute'])
assert_false("cosine" in VALID_METRICS['ball_tree'])
# Metric which don't required any additional parameter
require_params = ['mahalanobis', 'wminkowski', 'seuclidean']
for metric in VALID_METRICS['brute']:
if metric != 'precomputed' and metric not in require_params:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric=metric).fit(X)
nn.kneighbors(X)
elif metric == 'precomputed':
X_precomputed = rng.random_sample((10, 4))
Y_precomputed = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X_precomputed, metric='euclidean')
DYX = metrics.pairwise_distances(Y_precomputed, X_precomputed,
metric='euclidean')
nb_p = neighbors.NearestNeighbors(n_neighbors=3)
nb_p.fit(DXX)
nb_p.kneighbors(DYX)
for metric in VALID_METRICS_SPARSE['brute']:
if metric != 'precomputed' and metric not in require_params:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric=metric).fit(Xcsr)
nn.kneighbors(Xcsr)
# Metric with parameter
VI = np.dot(X, X.T)
list_metrics = [('seuclidean', dict(V=rng.rand(12))),
('wminkowski', dict(w=rng.rand(12))),
('mahalanobis', dict(VI=VI))]
for metric, params in list_metrics:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric=metric,
metric_params=params).fit(X)
nn.kneighbors(X)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# Non-regression test for #4523
# 'brute': uses scipy.spatial.distance through pairwise_distances
# 'ball_tree': uses sklearn.neighbors.dist_metrics
rng = np.random.RandomState(0)
X = rng.uniform(size=(6, 5))
NN = neighbors.NearestNeighbors
nn1 = NN(metric="jaccard", algorithm='brute').fit(X)
nn2 = NN(metric="jaccard", algorithm='ball_tree').fit(X)
assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
| bsd-3-clause |
MJuddBooth/pandas | asv_bench/benchmarks/replace.py | 5 | 1601 | import numpy as np
import pandas as pd
class FillNa(object):
params = [True, False]
param_names = ['inplace']
def setup(self, inplace):
N = 10**6
rng = pd.date_range('1/1/2000', periods=N, freq='min')
data = np.random.randn(N)
data[::2] = np.nan
self.ts = pd.Series(data, index=rng)
def time_fillna(self, inplace):
self.ts.fillna(0.0, inplace=inplace)
def time_replace(self, inplace):
self.ts.replace(np.nan, 0.0, inplace=inplace)
class ReplaceDict(object):
params = [True, False]
param_names = ['inplace']
def setup(self, inplace):
N = 10**5
start_value = 10**5
self.to_rep = dict(enumerate(np.arange(N) + start_value))
self.s = pd.Series(np.random.randint(N, size=10**3))
def time_replace_series(self, inplace):
self.s.replace(self.to_rep, inplace=inplace)
class Convert(object):
params = (['DataFrame', 'Series'], ['Timestamp', 'Timedelta'])
param_names = ['constructor', 'replace_data']
def setup(self, constructor, replace_data):
N = 10**3
data = {'Series': pd.Series(np.random.randint(N, size=N)),
'DataFrame': pd.DataFrame({'A': np.random.randint(N, size=N),
'B': np.random.randint(N, size=N)})}
self.to_replace = {i: getattr(pd, replace_data) for i in range(N)}
self.data = data[constructor]
def time_replace(self, constructor, replace_data):
self.data.replace(self.to_replace)
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.